summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/base/dma-coherent.c10
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/ataflop.c4
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/drbd/drbd_actlog.c23
-rw-r--r--drivers/block/drbd/drbd_int.h92
-rw-r--r--drivers/block/drbd/drbd_main.c28
-rw-r--r--drivers/block/drbd/drbd_nl.c485
-rw-r--r--drivers/block/drbd/drbd_nla.c1
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_protocol.h12
-rw-r--r--drivers/block/drbd/drbd_receiver.c196
-rw-r--r--drivers/block/drbd/drbd_req.c74
-rw-r--r--drivers/block/drbd/drbd_req.h6
-rw-r--r--drivers/block/drbd/drbd_state.c38
-rw-r--r--drivers/block/drbd/drbd_worker.c107
-rw-r--r--drivers/block/drbd/drbd_wrappers.h54
-rw-r--r--drivers/block/floppy.c20
-rw-r--r--drivers/block/hd.c10
-rw-r--r--drivers/block/mg_disk.c12
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1087
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h32
-rw-r--r--drivers/block/null_blk.c119
-rw-r--r--drivers/block/nvme-core.c11
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/skd_main.c12
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/block/virtio_blk.c88
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c53
-rw-r--r--drivers/block/xen-blkfront.c44
-rw-r--r--drivers/block/xsysace.c4
-rw-r--r--drivers/block/z2ram.c6
-rw-r--r--drivers/bus/Kconfig18
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/brcmstb_gisb.c289
-rw-r--r--drivers/bus/mvebu-mbus.c11
-rw-r--r--drivers/bus/omap_l3_noc.c406
-rw-r--r--drivers/bus/omap_l3_noc.h545
-rw-r--r--drivers/bus/vexpress-config.c202
-rw-r--r--drivers/cdrom/cdrom.c1271
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/at91/Makefile4
-rw-r--r--drivers/clk/at91/clk-main.c577
-rw-r--r--drivers/clk/at91/clk-slow.c467
-rw-r--r--drivers/clk/at91/pmc.c17
-rw-r--r--drivers/clk/at91/pmc.h9
-rw-r--r--drivers/clk/at91/sckc.c57
-rw-r--r--drivers/clk/at91/sckc.h22
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/samsung/Kconfig26
-rw-r--r--drivers/clk/samsung/Makefile7
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c780
-rw-r--r--drivers/clk/samsung/clk-exynos4.c51
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c118
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c1980
-rw-r--r--drivers/clk/samsung/clk-exynos5260.h459
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c209
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1127
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c18
-rw-r--r--drivers/clk/samsung/clk-pll.c489
-rw-r--r--drivers/clk/samsung/clk-pll.h8
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c440
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c482
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c274
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c466
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c44
-rw-r--r--drivers/clk/samsung/clk.c123
-rw-r--r--drivers/clk/samsung/clk.h72
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c64
-rw-r--r--drivers/clk/ti/clk-43xx.c6
-rw-r--r--drivers/clk/versatile/Kconfig26
-rw-r--r--drivers/clk/versatile/Makefile5
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c96
-rw-r--r--drivers/clocksource/Kconfig11
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c54
-rw-r--r--drivers/clocksource/exynos_mct.c8
-rw-r--r--drivers/clocksource/sh_cmt.c956
-rw-r--r--drivers/clocksource/sh_mtu2.c488
-rw-r--r--drivers/clocksource/sh_tmu.c541
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c10
-rw-r--r--drivers/clocksource/timer-prima2.c47
-rw-r--r--drivers/clocksource/versatile.c40
-rw-r--r--drivers/connector/connector.c17
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c21
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h38
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c39
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c49
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c43
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c1
-rw-r--r--drivers/cpuidle/Kconfig.arm11
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/cpuidle-armada-370-xp.c93
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c99
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/edma.c335
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/i82875p_edac.c8
-rw-r--r--drivers/edac/mce_amd.c2
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/extcon-adc-jack.c49
-rw-r--r--drivers/extcon/extcon-arizona.c40
-rw-r--r--drivers/extcon/extcon-class.c151
-rw-r--r--drivers/extcon/extcon-gpio.c37
-rw-r--r--drivers/extcon/extcon-max14577.c199
-rw-r--r--drivers/extcon/extcon-max77693.c23
-rw-r--r--drivers/extcon/extcon-max8997.c16
-rw-r--r--drivers/extcon/extcon-palmas.c41
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/gpio/Kconfig20
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c43
-rw-r--r--drivers/gpio/gpio-adp5520.c4
-rw-r--r--drivers/gpio/gpio-adp5588.c4
-rw-r--r--drivers/gpio/gpio-bt8xx.c19
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-dwapb.c38
-rw-r--r--drivers/gpio/gpio-em.c3
-rw-r--r--drivers/gpio/gpio-ep93xx.c34
-rw-r--r--drivers/gpio/gpio-ge.c174
-rw-r--r--drivers/gpio/gpio-generic.c9
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c38
-rw-r--r--drivers/gpio/gpio-kempld.c4
-rw-r--r--drivers/gpio/gpio-lynxpoint.c4
-rw-r--r--drivers/gpio/gpio-max730x.c5
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-moxart.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c13
-rw-r--r--drivers/gpio/gpio-omap.c205
-rw-r--r--drivers/gpio/gpio-palmas.c6
-rw-r--r--drivers/gpio/gpio-pca953x.c102
-rw-r--r--drivers/gpio/gpio-pcf857x.c11
-rw-r--r--drivers/gpio/gpio-pch.c1
-rw-r--r--drivers/gpio/gpio-pl061.c27
-rw-r--r--drivers/gpio/gpio-rc5t583.c4
-rw-r--r--drivers/gpio/gpio-rcar.c9
-rw-r--r--drivers/gpio/gpio-rdc321x.c23
-rw-r--r--drivers/gpio/gpio-sch.c26
-rw-r--r--drivers/gpio/gpio-sch311x.c16
-rw-r--r--drivers/gpio/gpio-spear-spics.c4
-rw-r--r--drivers/gpio/gpio-sx150x.c1
-rw-r--r--drivers/gpio/gpio-tc3589x.c148
-rw-r--r--drivers/gpio/gpio-tegra.c6
-rw-r--r--drivers/gpio/gpio-timberdale.c47
-rw-r--r--drivers/gpio/gpio-tps6586x.c4
-rw-r--r--drivers/gpio/gpio-tps65910.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpio-zevio.c18
-rw-r--r--drivers/gpio/gpiolib-acpi.c5
-rw-r--r--drivers/gpio/gpiolib-of.c16
-rw-r--r--drivers/gpio/gpiolib.c101
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/hsi/Kconfig1
-rw-r--r--drivers/hsi/Makefile1
-rw-r--r--drivers/hsi/clients/Kconfig17
-rw-r--r--drivers/hsi/clients/Makefile4
-rw-r--r--drivers/hsi/clients/hsi_char.c14
-rw-r--r--drivers/hsi/clients/nokia-modem.c285
-rw-r--r--drivers/hsi/clients/ssi_protocol.c1191
-rw-r--r--drivers/hsi/controllers/Kconfig19
-rw-r--r--drivers/hsi/controllers/Makefile6
-rw-r--r--drivers/hsi/controllers/omap_ssi.c625
-rw-r--r--drivers/hsi/controllers/omap_ssi.h166
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c1399
-rw-r--r--drivers/hsi/controllers/omap_ssi_regs.h171
-rw-r--r--drivers/hsi/hsi.c275
-rw-r--r--drivers/hv/channel.c19
-rw-r--r--drivers/hv/channel_mgmt.c52
-rw-r--r--drivers/hv/connection.c39
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/hv/hv_balloon.c29
-rw-r--r--drivers/hv/hyperv_vmbus.h5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/hwmon/vexpress.c17
-rw-r--r--drivers/ide/ide-disk.c5
-rw-r--r--drivers/iio/adc/at91_adc.c340
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/synaptics.c166
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig14
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c358
-rw-r--r--drivers/iommu/exynos-iommu.c14
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c24
-rw-r--r--drivers/irqchip/irq-orion.c4
-rw-r--r--drivers/irqchip/irq-sirfsoc.c3
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-versatile.c110
-rw-r--r--drivers/mcb/mcb-core.c20
-rw-r--r--drivers/mcb/mcb-pci.c17
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/memory/mvebu-devbus.c229
-rw-r--r--drivers/mfd/Kconfig21
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/max14577.c315
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/twl-core.c15
-rw-r--r--drivers/mfd/vexpress-config.c287
-rw-r--r--drivers/mfd/vexpress-sysreg.c554
-rw-r--r--drivers/misc/Kconfig14
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/arm-charlcd.c7
-rw-r--r--drivers/misc/ds1682.c5
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/mei/client.c90
-rw-r--r--drivers/misc/mei/hbm.c97
-rw-r--r--drivers/misc/mei/hbm.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h9
-rw-r--r--drivers/misc/mei/hw-me.c322
-rw-r--r--drivers/misc/mei/hw-me.h15
-rw-r--r--drivers/misc/mei/hw-txe-regs.h2
-rw-r--r--drivers/misc/mei/hw-txe.c111
-rw-r--r--drivers/misc/mei/hw-txe.h21
-rw-r--r--drivers/misc/mei/hw.h25
-rw-r--r--drivers/misc/mei/init.c60
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/mei/mei_dev.h101
-rw-r--r--drivers/misc/mei/pci-me.c256
-rw-r--r--drivers/misc/mei/pci-txe.c155
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/misc/vexpress-syscfg.c324
-rw-r--r--drivers/mmc/host/mmci.c324
-rw-r--r--drivers/mmc/host/mmci.h14
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c110
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c181
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c57
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/of/platform.c74
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/bus.c11
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/host/Kconfig13
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-exynos.c11
-rw-r--r--drivers/pci/host/pci-host-generic.c388
-rw-r--r--drivers/pci/host/pci-imx6.c147
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c31
-rw-r--r--drivers/pci/host/pcie-designware.c6
-rw-r--r--drivers/pci/host/pcie-designware.h2
-rw-r--r--drivers/pci/host/pcie-rcar.c1008
-rw-r--r--drivers/pci/hotplug-pci.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c1
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c3
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c3
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c15
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c1
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c5
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c96
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pci/pci-driver.c58
-rw-r--r--drivers/pci/pci-sysfs.c68
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/pci.h10
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c101
-rw-r--r--drivers/pci/quirks.c11
-rw-r--r--drivers/pci/search.c1
-rw-r--r--drivers/pci/setup-bus.c251
-rw-r--r--drivers/pci/setup-irq.c1
-rw-r--r--drivers/pci/setup-res.c42
-rw-r--r--drivers/pcmcia/cardbus.c3
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/eeepc-laptop.c3
-rw-r--r--drivers/power/reset/Kconfig23
-rw-r--r--drivers/power/reset/Makefile3
-rw-r--r--drivers/power/reset/axxia-reset.c88
-rw-r--r--drivers/power/reset/keystone-reset.c166
-rw-r--r--drivers/power/reset/sun6i-reboot.c85
-rw-r--r--drivers/power/reset/vexpress-poweroff.c16
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/max14577.c277
-rw-r--r--drivers/regulator/vexpress.c50
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-socfpga.c146
-rw-r--r--drivers/reset/reset-sunxi.c21
-rw-r--r--drivers/reset/sti/reset-stih415.c1
-rw-r--r--drivers/reset/sti/reset-stih416.c1
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/soc/Kconfig5
-rw-r--r--drivers/soc/Makefile5
-rw-r--r--drivers/soc/qcom/Kconfig11
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c85
-rw-r--r--drivers/tty/serial/msm_serial.c48
-rw-r--r--drivers/tty/serial/msm_serial.h5
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/video/fbdev/atafb.c49
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1.h8
-rw-r--r--drivers/w1/w1_int.c4
-rw-r--r--drivers/w1/w1_netlink.c649
-rw-r--r--drivers/w1/w1_netlink.h36
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xen-acpi-processor.c4
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c25
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
418 files changed, 26908 insertions, 8332 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0a0a90f52d26..0e87a34b6472 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -132,6 +132,8 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+source "drivers/soc/Kconfig"
+
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7183b6af5dac..f98b50d8251d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -33,6 +33,9 @@ obj-y += amba/
# really early.
obj-$(CONFIG_DMADEVICES) += dma/
+# SOC specific infrastructure drivers.
+obj-y += soc/
+
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -80,7 +83,6 @@ obj-$(CONFIG_PCCARD) += pcmcia/
obj-$(CONFIG_DIO) += dio/
obj-$(CONFIG_SBUS) += sbus/
obj-$(CONFIG_ZORRO) += zorro/
-obj-$(CONFIG_MAC) += macintosh/
obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
@@ -138,7 +140,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
-obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
static void __exit acpi_thermal_exit(void)
{
- destroy_workqueue(acpi_thermal_pm_queue);
acpi_bus_unregister_driver(&acpi_thermal_driver);
+ destroy_workqueue(acpi_thermal_pm_queue);
return;
}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bc256b641027..7d6e84a51424 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,13 +10,13 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
- phys_addr_t pfn_base;
+ unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
};
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void __iomem *mem_base = NULL;
@@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
- mem_base = ioremap(bus_addr, size);
+ mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
@@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
- dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
+ dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
- unsigned pfn = mem->pfn_base + start + off;
+ unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 0ce39a33b3c2..6cd08e145bfa 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
/**
* dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
* @dev: Device to declare coherent memory for
- * @bus_addr: Bus address of coherent memory to be declared
+ * @phys_addr: Physical address of coherent memory to be declared
* @device_addr: Device address of coherent memory to be declared
* @size: Size of coherent memory to be declared
* @flags: Flags
@@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void *res;
@@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
if (!res)
return -ENOMEM;
- rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
+ rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
if (rc == 0)
devres_add(dev, res);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 748dea4f34dc..758da2287d9a 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1406,7 +1406,7 @@ next_segment:
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
- data = rq->buffer + 512 * cnt;
+ data = bio_data(rq->bio) + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 96b629e1f0c9..2104b1b4ccda 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1484,7 +1484,7 @@ repeat:
ReqCnt = 0;
ReqCmd = rq_data_dir(fd_request);
ReqBlock = blk_rq_pos(fd_request);
- ReqBuffer = fd_request->buffer;
+ ReqBuffer = bio_data(fd_request->bio);
setup_req_params( drive );
do_fd_action( drive );
@@ -1952,7 +1952,7 @@ static int __init atari_floppy_init (void)
goto Enomem;
}
TrackBuffer = DMABuffer + 512;
- PhysDMABuffer = virt_to_phys(DMABuffer);
+ PhysDMABuffer = atari_stram_to_phys(DMABuffer);
PhysTrackBuffer = virt_to_phys(TrackBuffer);
BufferDrive = BufferSide = BufferTrack = -1;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 73894ca33956..4595c22f33f7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4080,7 +4080,7 @@ static void cciss_interrupt_mode(ctlr_info_t *h)
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
+ err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4);
if (!err) {
h->intr[0] = cciss_msix_entries[0].vector;
h->intr[1] = cciss_msix_entries[1].vector;
@@ -4088,10 +4088,6 @@ static void cciss_interrupt_mode(ctlr_info_t *h)
h->intr[3] = cciss_msix_entries[3].vector;
h->msix_vector = 1;
return;
- }
- if (err > 0) {
- dev_warn(&h->pdev->dev,
- "only %d MSI-X vectors available\n", err);
} else {
dev_warn(&h->pdev->dev,
"MSI-X init failed %d\n", err);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 90ae4ba8f9ee..05a1780ffa85 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -29,7 +29,6 @@
#include <linux/drbd_limits.h>
#include <linux/dynamic_debug.h>
#include "drbd_int.h"
-#include "drbd_wrappers.h"
enum al_transaction_types {
@@ -204,7 +203,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
BUG_ON(!bdev->md_bdev);
- drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
+ dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
@@ -276,7 +275,6 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval
return _al_get(device, first, true);
}
-static
bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
@@ -846,7 +844,7 @@ void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size,
int wake_up = 0;
unsigned long flags;
- if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -920,7 +918,7 @@ int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size
if (size == 0)
return 0;
- if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
+ if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
drbd_err(device, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
@@ -1023,8 +1021,7 @@ int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
int i, sig;
- int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
- 200 times -> 20 seconds. */
+ bool sa;
retry:
sig = wait_event_interruptible(device->al_wait,
@@ -1035,12 +1032,15 @@ retry:
if (test_bit(BME_LOCKED, &bm_ext->flags))
return 0;
+ /* step aside only while we are above c-min-rate; unless disabled. */
+ sa = drbd_rs_c_min_rate_throttle(device);
+
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
sig = wait_event_interruptible(device->al_wait,
!_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
- test_bit(BME_PRIORITY, &bm_ext->flags));
+ (sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
- if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
+ if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
spin_lock_irq(&device->al_lock);
if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
@@ -1052,9 +1052,6 @@ retry:
return -EINTR;
if (schedule_timeout_interruptible(HZ/10))
return -EINTR;
- if (sa && --sa == 0)
- drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec."
- "Resync stalled?\n");
goto retry;
}
}
@@ -1288,7 +1285,7 @@ void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e7093d4291f1..a76ceb344d64 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -382,6 +382,12 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
+ /* is this a TRIM aka REQ_DISCARD? */
+ __EE_IS_TRIM,
+ /* our lower level cannot handle trim,
+ * and we want to fall back to zeroout instead */
+ __EE_IS_TRIM_USE_ZEROOUT,
+
/* In case a barrier failed,
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
@@ -405,7 +411,9 @@ enum {
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
+#define EE_IS_TRIM (1<<__EE_IS_TRIM)
+#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
+#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
@@ -579,6 +587,7 @@ struct drbd_resource {
struct list_head resources;
struct res_opts res_opts;
struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ struct mutex adm_mutex; /* mutex to serialize administrative requests */
spinlock_t req_lock;
unsigned susp:1; /* IO suspended by user */
@@ -609,6 +618,7 @@ struct drbd_connection {
struct drbd_socket data; /* data/barrier/cstate/parameter packets */
struct drbd_socket meta; /* ping/ack (metadata) packets */
int agreed_pro_version; /* actually used protocol version */
+ u32 agreed_features;
unsigned long last_received; /* in jiffies, either socket */
unsigned int ko_count;
@@ -814,6 +824,28 @@ struct drbd_device {
struct submit_worker submit;
};
+struct drbd_config_context {
+ /* assigned from drbd_genlmsghdr */
+ unsigned int minor;
+ /* assigned from request attributes, if present */
+ unsigned int volume;
+#define VOLUME_UNSPECIFIED (-1U)
+ /* pointer into the request skb,
+ * limited lifetime! */
+ char *resource_name;
+ struct nlattr *my_addr;
+ struct nlattr *peer_addr;
+
+ /* reply buffer */
+ struct sk_buff *reply_skb;
+ /* pointer into reply buffer */
+ struct drbd_genlmsghdr *reply_dh;
+ /* resolved from attributes, if possible */
+ struct drbd_device *device;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
+};
+
static inline struct drbd_device *minor_to_device(unsigned int minor)
{
return (struct drbd_device *)idr_find(&drbd_devices, minor);
@@ -821,7 +853,7 @@ static inline struct drbd_device *minor_to_device(unsigned int minor)
static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
{
- return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
+ return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
}
#define for_each_resource(resource, _resources) \
@@ -1139,6 +1171,12 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
+/* For now, don't allow more than one activity log extent worth of data
+ * to be discarded in one go. We may need to rework drbd_al_begin_io()
+ * to allow for even larger discard ranges */
+#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
+#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
+
extern int drbd_bm_init(struct drbd_device *device);
extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_device *device);
@@ -1229,9 +1267,9 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
extern int conn_lowest_minor(struct drbd_connection *connection);
-enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr);
+extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
extern void drbd_destroy_device(struct kref *kref);
-extern void drbd_delete_device(struct drbd_device *mdev);
+extern void drbd_delete_device(struct drbd_device *device);
extern struct drbd_resource *drbd_create_resource(const char *name);
extern void drbd_free_resource(struct drbd_resource *resource);
@@ -1257,7 +1295,7 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
-extern int drbd_msg_put_info(const char *info);
+extern int drbd_msg_put_info(struct sk_buff *skb, const char *info);
extern void drbd_suspend_io(struct drbd_device *device);
extern void drbd_resume_io(struct drbd_device *device);
extern char *ppsize(char *buf, unsigned long long size);
@@ -1283,6 +1321,10 @@ extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
+/* bi_end_io handlers */
+extern void drbd_md_io_complete(struct bio *bio, int error);
+extern void drbd_peer_request_endio(struct bio *bio, int error);
+extern void drbd_request_endio(struct bio *bio, int error);
extern int drbd_worker(struct drbd_thread *thi);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
void drbd_resync_after_changed(struct drbd_device *device);
@@ -1332,16 +1374,20 @@ extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
+extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
+
/* drbd_receiver.c */
extern int drbd_receiver(struct drbd_thread *thi);
extern int drbd_asender(struct drbd_thread *thi);
-extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
+extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
+extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
const int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
+ bool,
gfp_t) __must_hold(local);
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
@@ -1401,6 +1447,37 @@ static inline void drbd_tcp_quickack(struct socket *sock)
(char*)&val, sizeof(val));
}
+/* sets the number of 512 byte sectors of our virtual device */
+static inline void drbd_set_my_capacity(struct drbd_device *device,
+ sector_t size)
+{
+ /* set_capacity(device->this_bdev->bd_disk, size); */
+ set_capacity(device->vdisk, size);
+ device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
+}
+
+/*
+ * used to submit our private bio
+ */
+static inline void drbd_generic_make_request(struct drbd_device *device,
+ int fault_type, struct bio *bio)
+{
+ __release(local);
+ if (!bio->bi_bdev) {
+ printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
+ "bio->bi_bdev == NULL\n",
+ device_to_minor(device));
+ dump_stack();
+ bio_endio(bio, -ENODEV);
+ return;
+ }
+
+ if (drbd_insert_fault(device, fault_type))
+ bio_endio(bio, -EIO);
+ else
+ generic_make_request(bio);
+}
+
void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
/* drbd_proc.c */
@@ -1410,6 +1487,7 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
+extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate);
extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
@@ -2144,7 +2222,7 @@ static inline void drbd_md_flush(struct drbd_device *device)
static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
{
- return list_first_entry(&resource->connections,
+ return list_first_entry_or_null(&resource->connections,
struct drbd_connection, connections);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 331e5cc1227d..960645c26e6f 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1607,8 +1607,8 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long b
return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
}
-/* Used to send write requests
- * R_PRIMARY -> Peer (P_DATA)
+/* Used to send write or TRIM aka REQ_DISCARD requests
+ * R_PRIMARY -> Peer (P_DATA, P_TRIM)
*/
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
@@ -1640,6 +1640,16 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
dp_flags |= DP_SEND_WRITE_ACK;
}
p->dp_flags = cpu_to_be32(dp_flags);
+
+ if (dp_flags & DP_DISCARD) {
+ struct p_trim *t = (struct p_trim*)p;
+ t->size = cpu_to_be32(req->i.size);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
+ goto out;
+ }
+
+ /* our digest is still only over the payload.
+ * TRIM does not carry any payload. */
if (dgs)
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
@@ -1675,6 +1685,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
... Be noisy about digest too large ...
} */
}
+out:
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
@@ -2570,6 +2581,7 @@ struct drbd_resource *drbd_create_resource(const char *name)
INIT_LIST_HEAD(&resource->connections);
list_add_tail_rcu(&resource->resources, &drbd_resources);
mutex_init(&resource->conf_update);
+ mutex_init(&resource->adm_mutex);
spin_lock_init(&resource->req_lock);
return resource;
@@ -2687,14 +2699,16 @@ static int init_submitter(struct drbd_device *device)
return 0;
}
-enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
+enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
+ struct drbd_resource *resource = adm_ctx->resource;
struct drbd_connection *connection;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
struct request_queue *q;
int id;
+ int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
device = minor_to_device(minor);
@@ -2763,7 +2777,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
if (id < 0) {
if (id == -ENOSPC) {
err = ERR_MINOR_EXISTS;
- drbd_msg_put_info("requested minor exists already");
+ drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
}
goto out_no_minor_idr;
}
@@ -2773,7 +2787,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
if (id < 0) {
if (id == -ENOSPC) {
err = ERR_MINOR_EXISTS;
- drbd_msg_put_info("requested minor exists already");
+ drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
}
goto out_idr_remove_minor;
}
@@ -2794,7 +2808,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
if (id < 0) {
if (id == -ENOSPC) {
err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
+ drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already");
}
goto out_idr_remove_from_resource;
}
@@ -2803,7 +2817,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
if (init_submitter(device)) {
err = ERR_NOMEM;
- drbd_msg_put_info("unable to create submit workqueue");
+ drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue");
goto out_idr_remove_vol;
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 526414bc2cab..1b35c45c92b7 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -34,7 +34,6 @@
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
-#include "drbd_wrappers.h"
#include <asm/unaligned.h>
#include <linux/drbd_limits.h>
#include <linux/kthread.h>
@@ -82,32 +81,6 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
/* used blkdev_get_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
-/* Configuration is strictly serialized, because generic netlink message
- * processing is strictly serialized by the genl_lock().
- * Which means we can use one static global drbd_config_context struct.
- */
-static struct drbd_config_context {
- /* assigned from drbd_genlmsghdr */
- unsigned int minor;
- /* assigned from request attributes, if present */
- unsigned int volume;
-#define VOLUME_UNSPECIFIED (-1U)
- /* pointer into the request skb,
- * limited lifetime! */
- char *resource_name;
- struct nlattr *my_addr;
- struct nlattr *peer_addr;
-
- /* reply buffer */
- struct sk_buff *reply_skb;
- /* pointer into reply buffer */
- struct drbd_genlmsghdr *reply_dh;
- /* resolved from attributes, if possible */
- struct drbd_device *device;
- struct drbd_resource *resource;
- struct drbd_connection *connection;
-} adm_ctx;
-
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
{
genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
@@ -117,9 +90,8 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
* reason it could fail was no space in skb, and there are 4k available. */
-int drbd_msg_put_info(const char *info)
+int drbd_msg_put_info(struct sk_buff *skb, const char *info)
{
- struct sk_buff *skb = adm_ctx.reply_skb;
struct nlattr *nla;
int err = -EMSGSIZE;
@@ -143,42 +115,46 @@ int drbd_msg_put_info(const char *info)
* and per-family private info->pointers.
* But we need to stay compatible with older kernels.
* If it returns successfully, adm_ctx members are valid.
+ *
+ * At this point, we still rely on the global genl_lock().
+ * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
+ * to add additional synchronization against object destruction/modification.
*/
#define DRBD_ADM_NEED_MINOR 1
#define DRBD_ADM_NEED_RESOURCE 2
#define DRBD_ADM_NEED_CONNECTION 4
-static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
- unsigned flags)
+static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
+ struct sk_buff *skb, struct genl_info *info, unsigned flags)
{
struct drbd_genlmsghdr *d_in = info->userhdr;
const u8 cmd = info->genlhdr->cmd;
int err;
- memset(&adm_ctx, 0, sizeof(adm_ctx));
+ memset(adm_ctx, 0, sizeof(*adm_ctx));
/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
return -EPERM;
- adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!adm_ctx.reply_skb) {
+ adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!adm_ctx->reply_skb) {
err = -ENOMEM;
goto fail;
}
- adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
+ adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
info, &drbd_genl_family, 0, cmd);
/* put of a few bytes into a fresh skb of >= 4k will always succeed.
* but anyways */
- if (!adm_ctx.reply_dh) {
+ if (!adm_ctx->reply_dh) {
err = -ENOMEM;
goto fail;
}
- adm_ctx.reply_dh->minor = d_in->minor;
- adm_ctx.reply_dh->ret_code = NO_ERROR;
+ adm_ctx->reply_dh->minor = d_in->minor;
+ adm_ctx->reply_dh->ret_code = NO_ERROR;
- adm_ctx.volume = VOLUME_UNSPECIFIED;
+ adm_ctx->volume = VOLUME_UNSPECIFIED;
if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
struct nlattr *nla;
/* parse and validate only */
@@ -188,111 +164,131 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
/* It was present, and valid,
* copy it over to the reply skb. */
- err = nla_put_nohdr(adm_ctx.reply_skb,
+ err = nla_put_nohdr(adm_ctx->reply_skb,
info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
info->attrs[DRBD_NLA_CFG_CONTEXT]);
if (err)
goto fail;
- /* and assign stuff to the global adm_ctx */
+ /* and assign stuff to the adm_ctx */
nla = nested_attr_tb[__nla_type(T_ctx_volume)];
if (nla)
- adm_ctx.volume = nla_get_u32(nla);
+ adm_ctx->volume = nla_get_u32(nla);
nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
if (nla)
- adm_ctx.resource_name = nla_data(nla);
- adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
- adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
- if ((adm_ctx.my_addr &&
- nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
- (adm_ctx.peer_addr &&
- nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
+ adm_ctx->resource_name = nla_data(nla);
+ adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+ adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+ if ((adm_ctx->my_addr &&
+ nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
+ (adm_ctx->peer_addr &&
+ nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
err = -EINVAL;
goto fail;
}
}
- adm_ctx.minor = d_in->minor;
- adm_ctx.device = minor_to_device(d_in->minor);
- if (adm_ctx.resource_name) {
- adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name);
+ adm_ctx->minor = d_in->minor;
+ adm_ctx->device = minor_to_device(d_in->minor);
+
+ /* We are protected by the global genl_lock().
+ * But we may explicitly drop it/retake it in drbd_adm_set_role(),
+ * so make sure this object stays around. */
+ if (adm_ctx->device)
+ kref_get(&adm_ctx->device->kref);
+
+ if (adm_ctx->resource_name) {
+ adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
}
- if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
- drbd_msg_put_info("unknown minor");
+ if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
return ERR_MINOR_INVALID;
}
- if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
- drbd_msg_put_info("unknown resource");
- if (adm_ctx.resource_name)
+ if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
+ if (adm_ctx->resource_name)
return ERR_RES_NOT_KNOWN;
return ERR_INVALID_REQUEST;
}
if (flags & DRBD_ADM_NEED_CONNECTION) {
- if (adm_ctx.resource) {
- drbd_msg_put_info("no resource name expected");
+ if (adm_ctx->resource) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.device) {
- drbd_msg_put_info("no minor number expected");
+ if (adm_ctx->device) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.my_addr && adm_ctx.peer_addr)
- adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
- nla_len(adm_ctx.my_addr),
- nla_data(adm_ctx.peer_addr),
- nla_len(adm_ctx.peer_addr));
- if (!adm_ctx.connection) {
- drbd_msg_put_info("unknown connection");
+ if (adm_ctx->my_addr && adm_ctx->peer_addr)
+ adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
+ nla_len(adm_ctx->my_addr),
+ nla_data(adm_ctx->peer_addr),
+ nla_len(adm_ctx->peer_addr));
+ if (!adm_ctx->connection) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
return ERR_INVALID_REQUEST;
}
}
/* some more paranoia, if the request was over-determined */
- if (adm_ctx.device && adm_ctx.resource &&
- adm_ctx.device->resource != adm_ctx.resource) {
+ if (adm_ctx->device && adm_ctx->resource &&
+ adm_ctx->device->resource != adm_ctx->resource) {
pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
- adm_ctx.minor, adm_ctx.resource->name,
- adm_ctx.device->resource->name);
- drbd_msg_put_info("minor exists in different resource");
+ adm_ctx->minor, adm_ctx->resource->name,
+ adm_ctx->device->resource->name);
+ drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.device &&
- adm_ctx.volume != VOLUME_UNSPECIFIED &&
- adm_ctx.volume != adm_ctx.device->vnr) {
+ if (adm_ctx->device &&
+ adm_ctx->volume != VOLUME_UNSPECIFIED &&
+ adm_ctx->volume != adm_ctx->device->vnr) {
pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
- adm_ctx.minor, adm_ctx.volume,
- adm_ctx.device->vnr,
- adm_ctx.device->resource->name);
- drbd_msg_put_info("minor exists as different volume");
+ adm_ctx->minor, adm_ctx->volume,
+ adm_ctx->device->vnr,
+ adm_ctx->device->resource->name);
+ drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
return ERR_INVALID_REQUEST;
}
+ /* still, provide adm_ctx->resource always, if possible. */
+ if (!adm_ctx->resource) {
+ adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
+ : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
+ if (adm_ctx->resource)
+ kref_get(&adm_ctx->resource->kref);
+ }
+
return NO_ERROR;
fail:
- nlmsg_free(adm_ctx.reply_skb);
- adm_ctx.reply_skb = NULL;
+ nlmsg_free(adm_ctx->reply_skb);
+ adm_ctx->reply_skb = NULL;
return err;
}
-static int drbd_adm_finish(struct genl_info *info, int retcode)
+static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
+ struct genl_info *info, int retcode)
{
- if (adm_ctx.connection) {
- kref_put(&adm_ctx.connection->kref, drbd_destroy_connection);
- adm_ctx.connection = NULL;
+ if (adm_ctx->device) {
+ kref_put(&adm_ctx->device->kref, drbd_destroy_device);
+ adm_ctx->device = NULL;
}
- if (adm_ctx.resource) {
- kref_put(&adm_ctx.resource->kref, drbd_destroy_resource);
- adm_ctx.resource = NULL;
+ if (adm_ctx->connection) {
+ kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
+ adm_ctx->connection = NULL;
+ }
+ if (adm_ctx->resource) {
+ kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
+ adm_ctx->resource = NULL;
}
- if (!adm_ctx.reply_skb)
+ if (!adm_ctx->reply_skb)
return -ENOMEM;
- adm_ctx.reply_dh->ret_code = retcode;
- drbd_adm_send_reply(adm_ctx.reply_skb, info);
+ adm_ctx->reply_dh->ret_code = retcode;
+ drbd_adm_send_reply(adm_ctx->reply_skb, info);
return 0;
}
@@ -426,6 +422,14 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec
}
rcu_read_unlock();
+ if (fp == FP_NOT_AVAIL) {
+ /* IO Suspending works on the whole resource.
+ Do it only for one device. */
+ vnr = 0;
+ peer_device = idr_get_next(&connection->peer_devices, &vnr);
+ drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
+ }
+
return fp;
}
@@ -438,12 +442,13 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
char *ex_to_string;
int r;
+ spin_lock_irq(&connection->resource->req_lock);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ spin_unlock_irq(&connection->resource->req_lock);
return false;
}
- spin_lock_irq(&connection->resource->req_lock);
connect_cnt = connection->connect_cnt;
spin_unlock_irq(&connection->resource->req_lock);
@@ -654,11 +659,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
put_ldev(device);
}
} else {
- mutex_lock(&device->resource->conf_update);
+ /* Called from drbd_adm_set_role only.
+ * We are still holding the conf_update mutex. */
nc = first_peer_device(device)->connection->net_conf;
if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
- mutex_unlock(&device->resource->conf_update);
set_disk_ro(device->vdisk, false);
if (get_ldev(device)) {
@@ -700,11 +705,12 @@ static const char *from_attrs_err_to_txt(int err)
int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct set_role_parms parms;
int err;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -715,17 +721,22 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
err = set_role_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
+ genl_unlock();
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
+
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ genl_lock();
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -1104,15 +1115,18 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_
struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
+ struct request_queue *b = NULL;
if (get_ldev_if_state(device, D_ATTACHING)) {
- struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
+ b = device->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
rcu_read_unlock();
- put_ldev(device);
+
+ blk_set_stacking_limits(&q->limits);
+ blk_queue_max_write_same_sectors(q, 0);
}
blk_queue_logical_block_size(q, 512);
@@ -1121,8 +1135,25 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
- if (get_ldev_if_state(device, D_ATTACHING)) {
- struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
+ if (b) {
+ struct drbd_connection *connection = first_peer_device(device)->connection;
+
+ if (blk_queue_discard(b) &&
+ (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
+ /* For now, don't allow more than one activity log extent worth of data
+ * to be discarded in one go. We may need to rework drbd_al_begin_io()
+ * to allow for even larger discard ranges */
+ q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ /* REALLY? Is stacking secdiscard "legal"? */
+ if (blk_queue_secdiscard(b))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+ } else {
+ q->limits.max_discard_sectors = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
+ }
blk_queue_stack_limits(q, b);
@@ -1164,8 +1195,14 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
else
peer = DRBD_MAX_BIO_SIZE;
- }
+ /* We may later detach and re-attach on a disconnected Primary.
+ * Avoid this setting to jump back in that case.
+ * We want to store what we know the peer DRBD can handle,
+ * not what the peer IO backend can handle. */
+ if (peer > device->peer_max_bio_size)
+ device->peer_max_bio_size = peer;
+ }
new = min(local, peer);
if (device->state.role == R_PRIMARY && new < now)
@@ -1258,19 +1295,21 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int err, fifo_size;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
device = adm_ctx.device;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
/* we also need a disk
* to change the options on */
@@ -1294,7 +1333,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
err = disk_conf_from_attrs_for_change(new_disk_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail_unlock;
}
@@ -1385,12 +1424,15 @@ fail_unlock:
success:
put_ldev(device);
out:
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_device *device;
int err;
enum drbd_ret_code retcode;
@@ -1406,13 +1448,14 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
enum drbd_state_rv rv;
struct net_conf *nc;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
device = adm_ctx.device;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
conn_reconfig_start(first_peer_device(device)->connection);
/* if you want to reconfigure, please tear down first */
@@ -1455,7 +1498,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
err = disk_conf_from_attrs(new_disk_conf, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
@@ -1619,7 +1662,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
if (device->state.conn < C_CONNECTED &&
- device->state.role == R_PRIMARY &&
+ device->state.role == R_PRIMARY && device->ed_uuid &&
(device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
(unsigned long long)device->ed_uuid);
@@ -1797,7 +1840,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device);
conn_reconfig_done(first_peer_device(device)->connection);
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
force_diskless_dec:
@@ -1819,9 +1863,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
kfree(new_disk_conf);
lc_destroy(resync_lru);
kfree(new_plan);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -1860,11 +1904,12 @@ out:
* Only then we have finally detached. */
int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct detach_parms parms = { };
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -1874,14 +1919,16 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
err = detach_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_detach(adm_ctx.device, parms.force_detach);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2055,6 +2102,7 @@ static void free_crypto(struct crypto *crypto)
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct drbd_connection *connection;
struct net_conf *old_net_conf, *new_net_conf = NULL;
@@ -2063,13 +2111,14 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
int rsr; /* re-sync running */
struct crypto crypto = { };
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
connection = adm_ctx.connection;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
@@ -2084,7 +2133,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
old_net_conf = connection->net_conf;
if (!old_net_conf) {
- drbd_msg_put_info("net conf missing, try connect");
+ drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
retcode = ERR_INVALID_REQUEST;
goto fail;
}
@@ -2096,7 +2145,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
err = net_conf_from_attrs_for_change(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
@@ -2167,12 +2216,15 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
done:
conn_reconfig_done(connection);
out:
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_peer_device *peer_device;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct crypto crypto = { };
@@ -2182,14 +2234,14 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
int i;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
- drbd_msg_put_info("connection endpoint(s) missing");
+ drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
retcode = ERR_INVALID_REQUEST;
goto out;
}
@@ -2215,6 +2267,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
connection = first_connection(adm_ctx.resource);
conn_reconfig_start(connection);
@@ -2235,7 +2288,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
err = net_conf_from_attrs(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
@@ -2284,7 +2337,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
fail:
@@ -2292,8 +2346,9 @@ fail:
kfree(new_net_conf);
conn_reconfig_done(connection);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2356,13 +2411,14 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct disconnect_parms parms;
struct drbd_connection *connection;
enum drbd_state_rv rv;
enum drbd_ret_code retcode;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -2374,18 +2430,20 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
err = disconnect_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */
else
retcode = NO_ERROR;
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
fail:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2407,6 +2465,7 @@ void resync_after_online_grow(struct drbd_device *device)
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
struct drbd_device *device;
@@ -2417,12 +2476,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
sector_t u_size;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto fail;
+ goto finish;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
@@ -2436,7 +2496,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
err = resize_parms_from_attrs(&rs, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail_ldev;
}
}
@@ -2482,7 +2542,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail_ldev;
}
- if (device->state.conn != C_CONNECTED) {
+ if (device->state.conn != C_CONNECTED && !rs.resize_force) {
retcode = ERR_MD_LAYOUT_CONNECTED;
goto fail_ldev;
}
@@ -2528,7 +2588,9 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
fail:
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
fail_ldev:
@@ -2538,11 +2600,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -2555,33 +2618,37 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
err = set_resource_options(adm_ctx.resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
retcode = ERR_NOMEM;
}
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
fail:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
@@ -2605,26 +2672,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
union drbd_state mask, union drbd_state val)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = drbd_request_state(adm_ctx.device, mask, val);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2639,15 +2709,17 @@ static int drbd_bmio_set_susp_al(struct drbd_device *device)
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
int retcode; /* drbd_ret_code, drbd_state_rv */
struct drbd_device *device;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
@@ -2674,40 +2746,45 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
union drbd_dev_state s;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
s = adm_ctx.device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
@@ -2717,9 +2794,9 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
retcode = ERR_PAUSE_IS_CLEAR;
}
}
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2730,15 +2807,17 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
drbd_uuid_new_current(device);
@@ -2753,9 +2832,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -2931,10 +3010,11 @@ nla_put_failure:
int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -2946,7 +3026,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
return err;
}
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -3133,11 +3213,12 @@ dump:
int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct timeout_parms tp;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -3154,17 +3235,18 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
return err;
}
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_device *device;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -3179,10 +3261,12 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
int err = start_ov_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
+
/* w_make_ov_request expects position to be aligned */
device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
device->ov_stop_sector = parms.ov_stop_sector;
@@ -3193,21 +3277,24 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
drbd_resume_io(device);
+
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_device *device;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
struct new_c_uuid_parms args;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -3219,11 +3306,12 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
err = new_c_uuid_parms_from_attrs(&args, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out_nolock;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(device)) {
@@ -3268,22 +3356,24 @@ out_dec:
put_ldev(device);
out:
mutex_unlock(device->state_mutex);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out_nolock:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static enum drbd_ret_code
-drbd_check_resource_name(const char *name)
+drbd_check_resource_name(struct drbd_config_context *adm_ctx)
{
+ const char *name = adm_ctx->resource_name;
if (!name || !name[0]) {
- drbd_msg_put_info("resource name missing");
+ drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
return ERR_MANDATORY_TAG;
}
/* if we want to use these in sysfs/configfs/debugfs some day,
* we must not allow slashes */
if (strchr(name, '/')) {
- drbd_msg_put_info("invalid resource name");
+ drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
return ERR_INVALID_REQUEST;
}
return NO_ERROR;
@@ -3291,11 +3381,12 @@ drbd_check_resource_name(const char *name)
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
- retcode = drbd_adm_prepare(skb, info, 0);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
@@ -3305,48 +3396,50 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
- retcode = drbd_check_resource_name(adm_ctx.resource_name);
+ retcode = drbd_check_resource_name(&adm_ctx);
if (retcode != NO_ERROR)
goto out;
if (adm_ctx.resource) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST;
- drbd_msg_put_info("resource exists");
+ drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
}
/* else: still NO_ERROR */
goto out;
}
+ /* not yet safe for genl_family.parallel_ops */
if (!conn_create(adm_ctx.resource_name, &res_opts))
retcode = ERR_NOMEM;
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_genlmsghdr *dh = info->userhdr;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
if (dh->minor > MINORMASK) {
- drbd_msg_put_info("requested minor out of range");
+ drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
if (adm_ctx.volume > DRBD_VOLUME_MAX) {
- drbd_msg_put_info("requested volume id out of range");
+ drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
@@ -3360,9 +3453,11 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- retcode = drbd_create_device(adm_ctx.resource, dh->minor, adm_ctx.volume);
+ mutex_lock(&adm_ctx.resource->adm_mutex);
+ retcode = drbd_create_device(&adm_ctx, dh->minor);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
@@ -3383,35 +3478,40 @@ static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_del_minor(adm_ctx.device);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
- drbd_adm_finish(info, retcode);
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_resource *resource;
struct drbd_connection *connection;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
unsigned i;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
resource = adm_ctx.resource;
+ mutex_lock(&resource->adm_mutex);
/* demote */
for_each_connection(connection, resource) {
struct drbd_peer_device *peer_device;
@@ -3419,14 +3519,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to demote");
+ drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
goto out;
}
}
retcode = conn_try_disconnect(connection, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to disconnect");
+ drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
goto out;
}
}
@@ -3435,7 +3535,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
idr_for_each_entry(&resource->devices, device, i) {
retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
- drbd_msg_put_info("failed to detach");
+ drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
goto out;
}
}
@@ -3453,7 +3553,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
- drbd_msg_put_info("failed to delete volume");
+ drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
goto out;
}
}
@@ -3462,25 +3562,28 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
synchronize_rcu();
drbd_free_resource(resource);
retcode = NO_ERROR;
-
out:
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&resource->adm_mutex);
+finish:
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context adm_ctx;
struct drbd_resource *resource;
struct drbd_connection *connection;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
resource = adm_ctx.resource;
+ mutex_lock(&resource->adm_mutex);
for_each_connection(connection, resource) {
if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED;
@@ -3499,7 +3602,9 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
drbd_free_resource(resource);
retcode = NO_ERROR;
out:
- drbd_adm_finish(info, retcode);
+ mutex_unlock(&resource->adm_mutex);
+finish:
+ drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
index fa672b6df8d6..b2d4791498a6 100644
--- a/drivers/block/drbd/drbd_nla.c
+++ b/drivers/block/drbd/drbd_nla.c
@@ -1,4 +1,3 @@
-#include "drbd_wrappers.h"
#include <linux/kernel.h>
#include <net/netlink.h>
#include <linux/drbd_genl_api.h>
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2f26e8ffa45b..89736bdbbc70 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -116,7 +116,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
/* ------------------------ ~18s average ------------------------ */
i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
- if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
+ if (dt > 180)
stalled = 1;
if (!dt)
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index 3c04ec0ea333..2da9104a3851 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -54,6 +54,11 @@ enum drbd_packet {
P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
+ /* 0x2e to 0x30 reserved, used in drbd 9 */
+
+ /* REQ_DISCARD. We used "discard" in different contexts before,
+ * which is why I chose TRIM here, to disambiguate. */
+ P_TRIM = 0x31,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -119,6 +124,11 @@ struct p_data {
u32 dp_flags;
} __packed;
+struct p_trim {
+ struct p_data p_data;
+ u32 size; /* == bio->bi_size */
+} __packed;
+
/*
* commands which share a struct:
* p_block_ack:
@@ -150,6 +160,8 @@ struct p_block_req {
* ReportParams
*/
+#define FF_TRIM 1
+
struct p_connection_features {
u32 protocol_min;
u32 feature_flags;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 68e3992e8838..b6c8aaf4931b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -46,9 +46,10 @@
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
-
#include "drbd_vli.h"
+#define PRO_FEATURES (FF_TRIM)
+
struct packet_info {
enum drbd_packet cmd;
unsigned int size;
@@ -65,7 +66,7 @@ enum finish_epoch {
static int drbd_do_features(struct drbd_connection *connection);
static int drbd_do_auth(struct drbd_connection *connection);
static int drbd_disconnected(struct drbd_peer_device *);
-
+static void conn_wait_active_ee_empty(struct drbd_connection *connection);
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_work *, int);
@@ -234,9 +235,17 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
* @retry: whether to retry, if not enough pages are available right now
*
* Tries to allocate number pages, first from our own page pool, then from
- * the kernel, unless this allocation would exceed the max_buffers setting.
+ * the kernel.
* Possibly retry until DRBD frees sufficient pages somewhere else.
*
+ * If this allocation would exceed the max_buffers setting, we throttle
+ * allocation (schedule_timeout) to give the system some room to breathe.
+ *
+ * We do not use max-buffers as hard limit, because it could lead to
+ * congestion and further to a distributed deadlock during online-verify or
+ * (checksum based) resync, if the max-buffers, socket buffer sizes and
+ * resync-rate settings are mis-configured.
+ *
* Returns a page chain linked via page->private.
*/
struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
@@ -246,10 +255,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
struct page *page = NULL;
struct net_conf *nc;
DEFINE_WAIT(wait);
- int mxb;
+ unsigned int mxb;
- /* Yes, we may run up to @number over max_buffers. If we
- * follow it strictly, the admin will get it wrong anyways. */
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000;
@@ -277,7 +284,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
break;
}
- schedule();
+ if (schedule_timeout(HZ/10) == 0)
+ mxb = UINT_MAX;
}
finish_wait(&drbd_pp_wait, &wait);
@@ -331,7 +339,7 @@ You must not have the req_lock:
struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
- unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
+ unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
@@ -348,7 +356,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
- if (data_size) {
+ if (has_payload && data_size) {
page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
@@ -1026,24 +1034,27 @@ randomize:
if (drbd_send_protocol(connection) == -EOPNOTSUPP)
return -1;
+ /* Prevent a race between resync-handshake and
+ * being promoted to Primary.
+ *
+ * Grab and release the state mutex, so we know that any current
+ * drbd_set_role() is finished, and any incoming drbd_set_role
+ * will see the STATE_SENT flag, and wait for it to be cleared.
+ */
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ mutex_lock(peer_device->device->state_mutex);
+
set_bit(STATE_SENT, &connection->flags);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ mutex_unlock(peer_device->device->state_mutex);
+
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
- /* Prevent a race between resync-handshake and
- * being promoted to Primary.
- *
- * Grab and release the state mutex, so we know that any current
- * drbd_set_role() is finished, and any incoming drbd_set_role
- * will see the STATE_SENT flag, and wait for it to be cleared.
- */
- mutex_lock(device->state_mutex);
- mutex_unlock(device->state_mutex);
-
if (discard_my_data)
set_bit(DISCARD_MY_DATA, &device->flags);
else
@@ -1315,6 +1326,20 @@ int drbd_submit_peer_request(struct drbd_device *device,
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
+ if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) {
+ /* wait for all pending IO completions, before we start
+ * zeroing things out. */
+ conn_wait_active_ee_empty(first_peer_device(device)->connection);
+ if (blkdev_issue_zeroout(device->ldev->backing_bdev,
+ sector, ds >> 9, GFP_NOIO))
+ peer_req->flags |= EE_WAS_ERROR;
+ drbd_endio_write_sec_final(peer_req);
+ return 0;
+ }
+
+ if (peer_req->flags & EE_IS_TRIM)
+ nr_pages = 0; /* discards don't have any payload. */
+
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
@@ -1326,7 +1351,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
- drbd_err(device, "submit_ee: Allocation of a bio failed\n");
+ drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
@@ -1340,6 +1365,11 @@ next_bio:
bios = bio;
++n_bios;
+ if (rw & REQ_DISCARD) {
+ bio->bi_iter.bi_size = ds;
+ goto submit;
+ }
+
page_chain_for_each(page) {
unsigned len = min_t(unsigned, ds, PAGE_SIZE);
if (!bio_add_page(bio, page, len, 0)) {
@@ -1360,8 +1390,9 @@ next_bio:
sector += len >> 9;
--nr_pages;
}
- D_ASSERT(device, page == NULL);
D_ASSERT(device, ds == 0);
+submit:
+ D_ASSERT(device, page == NULL);
atomic_set(&peer_req->pending_bios, n_bios);
do {
@@ -1490,19 +1521,21 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
* and from receive_Data */
static struct drbd_peer_request *
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
- int data_size) __must_hold(local)
+ struct packet_info *pi) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
const sector_t capacity = drbd_get_capacity(device->this_bdev);
struct drbd_peer_request *peer_req;
struct page *page;
int dgs, ds, err;
+ int data_size = pi->size;
void *dig_in = peer_device->connection->int_dig_in;
void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
+ struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
dgs = 0;
- if (peer_device->connection->peer_integrity_tfm) {
+ if (!trim && peer_device->connection->peer_integrity_tfm) {
dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
@@ -1514,9 +1547,15 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
data_size -= dgs;
}
+ if (trim) {
+ D_ASSERT(peer_device, data_size == 0);
+ data_size = be32_to_cpu(trim->size);
+ }
+
if (!expect(IS_ALIGNED(data_size, 512)))
return NULL;
- if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+ /* prepare for larger trim requests. */
+ if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE))
return NULL;
/* even though we trust out peer,
@@ -1532,11 +1571,11 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO);
if (!peer_req)
return NULL;
- if (!data_size)
+ if (trim)
return peer_req;
ds = data_size;
@@ -1676,12 +1715,12 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
}
static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
- int data_size) __releases(local)
+ struct packet_info *pi) __releases(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
- peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size);
+ peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
if (!peer_req)
goto fail;
@@ -1697,7 +1736,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
list_add(&peer_req->w.list, &device->sync_ee);
spin_unlock_irq(&device->resource->req_lock);
- atomic_add(data_size >> 9, &device->rs_sect_ev);
+ atomic_add(pi->size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
return 0;
@@ -1785,7 +1824,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
* or in drbd_peer_request_endio. */
- err = recv_resync_read(peer_device, sector, pi->size);
+ err = recv_resync_read(peer_device, sector, pi);
} else {
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "Can not write resync data to local disk.\n");
@@ -2196,7 +2235,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
*/
sector = be64_to_cpu(p->sector);
- peer_req = read_in_block(peer_device, p->block_id, sector, pi->size);
+ peer_req = read_in_block(peer_device, p->block_id, sector, pi);
if (!peer_req) {
put_ldev(device);
return -EIO;
@@ -2206,7 +2245,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(dp_flags);
- if (peer_req->pages == NULL) {
+ if (pi->cmd == P_TRIM) {
+ struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
+ peer_req->flags |= EE_IS_TRIM;
+ if (!blk_queue_discard(q))
+ peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
+ D_ASSERT(peer_device, peer_req->i.size > 0);
+ D_ASSERT(peer_device, rw & REQ_DISCARD);
+ D_ASSERT(peer_device, peer_req->pages == NULL);
+ } else if (peer_req->pages == NULL) {
D_ASSERT(device, peer_req->i.size == 0);
D_ASSERT(device, dp_flags & DP_FLUSH);
}
@@ -2242,7 +2289,12 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
update_peer_seq(peer_device, peer_seq);
spin_lock_irq(&device->resource->req_lock);
}
- list_add(&peer_req->w.list, &device->active_ee);
+ /* if we use the zeroout fallback code, we process synchronously
+ * and we wait for all pending requests, respectively wait for
+ * active_ee to become empty in drbd_submit_peer_request();
+ * better not add ourselves here. */
+ if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0)
+ list_add(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
if (device->state.conn == C_SYNC_TARGET)
@@ -2313,39 +2365,45 @@ out_interrupted:
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
-int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
+bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
{
- struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
- unsigned long db, dt, dbdt;
struct lc_element *tmp;
- int curr_events;
- int throttle = 0;
- unsigned int c_min_rate;
-
- rcu_read_lock();
- c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
- rcu_read_unlock();
+ bool throttle = true;
- /* feature disabled? */
- if (c_min_rate == 0)
- return 0;
+ if (!drbd_rs_c_min_rate_throttle(device))
+ return false;
spin_lock_irq(&device->al_lock);
tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
if (tmp) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
- if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
- spin_unlock_irq(&device->al_lock);
- return 0;
- }
+ if (test_bit(BME_PRIORITY, &bm_ext->flags))
+ throttle = false;
/* Do not slow down if app IO is already waiting for this extent */
}
spin_unlock_irq(&device->al_lock);
+ return throttle;
+}
+
+bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
+{
+ struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+ unsigned long db, dt, dbdt;
+ unsigned int c_min_rate;
+ int curr_events;
+
+ rcu_read_lock();
+ c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
+ rcu_read_unlock();
+
+ /* feature disabled? */
+ if (c_min_rate == 0)
+ return false;
+
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&device->rs_sect_ev);
-
if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
unsigned long rs_left;
int i;
@@ -2368,12 +2426,11 @@ int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
dbdt = Bit2KB(db/dt);
if (dbdt > c_min_rate)
- throttle = 1;
+ return true;
}
- return throttle;
+ return false;
}
-
static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
@@ -2436,7 +2493,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
+ true /* has real payload */, GFP_NOIO);
if (!peer_req) {
put_ldev(device);
return -ENOMEM;
@@ -3648,6 +3706,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
put_ldev(device);
}
+ device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
+ drbd_reconsider_max_bio_size(device);
+ /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size().
+ In case we cleared the QUEUE_FLAG_DISCARD from our queue in
+ drbd_reconsider_max_bio_size(), we can be sure that after
+ drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
+
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(device)) {
dd = drbd_determine_dev_size(device, ddsf, NULL);
@@ -3660,9 +3725,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
drbd_set_my_capacity(device, p_size);
}
- device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
- drbd_reconsider_max_bio_size(device);
-
if (get_ldev(device)) {
if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
@@ -4423,6 +4485,7 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
+ [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
};
static void drbdd(struct drbd_connection *connection)
@@ -4630,6 +4693,7 @@ static int drbd_send_features(struct drbd_connection *connection)
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
+ p->feature_flags = cpu_to_be32(PRO_FEATURES);
return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
@@ -4683,10 +4747,14 @@ static int drbd_do_features(struct drbd_connection *connection)
goto incompat;
connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
drbd_info(connection, "Handshake successful: "
"Agreed network protocol version %d\n", connection->agreed_pro_version);
+ drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n",
+ connection->agreed_features & FF_TRIM ? " " : " not ");
+
return 1;
incompat:
@@ -4778,6 +4846,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
+ if (pi.size < CHALLENGE_LEN) {
+ drbd_err(connection, "AuthChallenge payload too small.\n");
+ rv = -1;
+ goto fail;
+ }
+
peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
drbd_err(connection, "kmalloc of peers_ch failed\n");
@@ -4791,6 +4865,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
+ if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
+ drbd_err(connection, "Peer presented the same challenge!\n");
+ rv = -1;
+ goto fail;
+ }
+
resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3779c8d2875b..09803d0d5207 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -522,6 +522,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
+ case DISCARD_COMPLETED_NOTSUPP:
+ case DISCARD_COMPLETED_WITH_ERROR:
+ /* I'd rather not detach from local disk just because it
+ * failed a REQ_DISCARD. */
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
+ break;
+
case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
@@ -1235,6 +1242,7 @@ void do_submit(struct work_struct *ws)
if (list_empty(&incoming))
break;
+skip_fast_path:
wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending));
/* Maybe more was queued, while we prepared the transaction?
* Try to stuff them into this transaction as well.
@@ -1273,6 +1281,25 @@ void do_submit(struct work_struct *ws)
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+
+ /* If all currently hot activity log extents are kept busy by
+ * incoming requests, we still must not totally starve new
+ * requests to cold extents. In that case, prepare one request
+ * in blocking mode. */
+ list_for_each_entry_safe(req, tmp, &incoming, tl_requests) {
+ list_del_init(&req->tl_requests);
+ req->rq_state |= RQ_IN_ACT_LOG;
+ if (!drbd_al_begin_io_prepare(device, &req->i)) {
+ /* Corresponding extent was hot after all? */
+ drbd_send_and_submit(device, req);
+ } else {
+ /* Found a request to a cold extent.
+ * Put on "pending" list,
+ * and try to cumulate with more. */
+ list_add(&req->tl_requests, &pending);
+ goto skip_fast_path;
+ }
+ }
}
}
@@ -1326,23 +1353,35 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
return limit;
}
-static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
+static void find_oldest_requests(
+ struct drbd_connection *connection,
+ struct drbd_device *device,
+ struct drbd_request **oldest_req_waiting_for_peer,
+ struct drbd_request **oldest_req_waiting_for_disk)
{
- /* Walk the transfer log,
- * and find the oldest not yet completed request */
struct drbd_request *r;
+ *oldest_req_waiting_for_peer = NULL;
+ *oldest_req_waiting_for_disk = NULL;
list_for_each_entry(r, &connection->transfer_log, tl_requests) {
- if (atomic_read(&r->completion_ref))
- return r;
+ const unsigned s = r->rq_state;
+ if (!*oldest_req_waiting_for_peer
+ && ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE)))
+ *oldest_req_waiting_for_peer = r;
+
+ if (!*oldest_req_waiting_for_disk
+ && (s & RQ_LOCAL_PENDING) && r->device == device)
+ *oldest_req_waiting_for_disk = r;
+
+ if (*oldest_req_waiting_for_peer && *oldest_req_waiting_for_disk)
+ break;
}
- return NULL;
}
void request_timer_fn(unsigned long data)
{
struct drbd_device *device = (struct drbd_device *) data;
struct drbd_connection *connection = first_peer_device(device)->connection;
- struct drbd_request *req; /* oldest request */
+ struct drbd_request *req_disk, *req_peer; /* oldest request */
struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
@@ -1366,8 +1405,8 @@ void request_timer_fn(unsigned long data)
now = jiffies;
spin_lock_irq(&device->resource->req_lock);
- req = find_oldest_request(connection);
- if (!req) {
+ find_oldest_requests(connection, device, &req_peer, &req_disk);
+ if (req_peer == NULL && req_disk == NULL) {
spin_unlock_irq(&device->resource->req_lock);
mod_timer(&device->request_timer, now + et);
return;
@@ -1389,19 +1428,26 @@ void request_timer_fn(unsigned long data)
* ~198 days with 250 HZ, we have a window where the timeout would need
* to expire twice (worst case) to become effective. Good enough.
*/
- if (ent && req->rq_state & RQ_NET_PENDING &&
- time_after(now, req->start_time + ent) &&
+ if (ent && req_peer &&
+ time_after(now, req_peer->start_time + ent) &&
!time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device &&
- time_after(now, req->start_time + dt) &&
+ if (dt && req_disk &&
+ time_after(now, req_disk->start_time + dt) &&
!time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
}
- nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
+
+ /* Reschedule timer for the nearest not already expired timeout.
+ * Fallback to now + min(effective network timeout, disk timeout). */
+ ent = (ent && req_peer && time_before(now, req_peer->start_time + ent))
+ ? req_peer->start_time + ent : now + et;
+ dt = (dt && req_disk && time_before(now, req_disk->start_time + dt))
+ ? req_disk->start_time + dt : now + et;
+ nt = time_before(ent, dt) ? ent : dt;
spin_unlock_irq(&connection->resource->req_lock);
mod_timer(&device->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index c684c963538e..8566cd5866b4 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
-#include "drbd_wrappers.h"
/* The request callbacks will be called in irq context by the IDE drivers,
and in Softirqs/Tasklets/BH context by the SCSI drivers,
@@ -111,11 +110,14 @@ enum drbd_req_event {
BARRIER_ACKED, /* in protocol A and B */
DATA_RECEIVED, /* (remote read) */
+ COMPLETED_OK,
READ_COMPLETED_WITH_ERROR,
READ_AHEAD_COMPLETED_WITH_ERROR,
WRITE_COMPLETED_WITH_ERROR,
+ DISCARD_COMPLETED_NOTSUPP,
+ DISCARD_COMPLETED_WITH_ERROR,
+
ABORT_DISK_IO,
- COMPLETED_OK,
RESEND,
FAIL_FROZEN_DISK_IO,
RESTART_FROZEN_DISK_IO,
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 1a84345a3868..a5d8aae00e04 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -54,8 +54,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
-static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
- enum sanitize_state_warnings *warn);
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
+ union drbd_state ns, enum sanitize_state_warnings *warn);
static inline bool is_susp(union drbd_state s)
{
@@ -287,7 +287,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
- ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
+ ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
@@ -333,7 +333,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
- ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
+ ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS) {
spin_unlock_irqrestore(&device->resource->req_lock, flags);
@@ -740,8 +740,8 @@ static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_st
* When we loose connection, we have to set the state of the peers disk (pdsk)
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
-static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
- enum sanitize_state_warnings *warn)
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
+ union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
@@ -882,11 +882,13 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
}
if (fp == FP_STONITH &&
- (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
+ (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
+ !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
+ !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
@@ -958,7 +960,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
os = drbd_read_state(device);
- ns = sanitize_state(device, ns, &ssw);
+ ns = sanitize_state(device, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -1656,7 +1658,7 @@ conn_is_valid_transition(struct drbd_connection *connection, union drbd_state ma
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
os = drbd_read_state(device);
- ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
+ ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
@@ -1718,7 +1720,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union
number_of_volumes++;
os = drbd_read_state(device);
ns = apply_mask_val(os, mask, val);
- ns = sanitize_state(device, ns, NULL);
+ ns = sanitize_state(device, os, ns, NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
@@ -1763,19 +1765,19 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union
static enum drbd_state_rv
_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
- enum drbd_state_rv rv;
+ enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */;
if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
- return SS_CW_SUCCESS;
+ rv = SS_CW_SUCCESS;
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
- return SS_CW_FAILED_BY_PEER;
+ rv = SS_CW_FAILED_BY_PEER;
- rv = conn_is_valid_transition(connection, mask, val, 0);
- if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
- rv = SS_UNKNOWN_ERROR; /* continue waiting */
+ err = conn_is_valid_transition(connection, mask, val, 0);
+ if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
+ return rv;
- return rv;
+ return err;
}
enum drbd_state_rv
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 2c4ce42c3657..d8f57b6305cd 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -118,7 +118,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
-static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
+void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
struct drbd_peer_device *peer_device = peer_req->peer_device;
@@ -150,7 +150,9 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
- if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ /* FIXME do we want to detach for failed REQ_DISCARD?
+ * ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */
+ if (peer_req->flags & EE_WAS_ERROR)
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
@@ -176,10 +178,12 @@ void drbd_peer_request_endio(struct bio *bio, int error)
struct drbd_device *device = peer_req->peer_device->device;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
+ int is_discard = !!(bio->bi_rw & REQ_DISCARD);
if (error && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
- is_write ? "write" : "read", error,
+ is_write ? (is_discard ? "discard" : "write")
+ : "read", error,
(unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
@@ -263,7 +267,12 @@ void drbd_request_endio(struct bio *bio, int error)
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
- what = (bio_data_dir(bio) == WRITE)
+ if (bio->bi_rw & REQ_DISCARD)
+ what = (error == -EOPNOTSUPP)
+ ? DISCARD_COMPLETED_NOTSUPP
+ : DISCARD_COMPLETED_WITH_ERROR;
+ else
+ what = (bio_data_dir(bio) == WRITE)
? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
? READ_COMPLETED_WITH_ERROR
@@ -395,7 +404,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
- size, GFP_TRY);
+ size, true /* has real payload */, GFP_TRY);
if (!peer_req)
goto defer;
@@ -492,10 +501,9 @@ struct fifo_buffer *fifo_alloc(int fifo_size)
return fb;
}
-static int drbd_rs_controller(struct drbd_device *device)
+static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in)
{
struct disk_conf *dc;
- unsigned int sect_in; /* Number of sectors that came in since the last turn */
unsigned int want; /* The number of sectors we want in the proxy */
int req_sect; /* Number of sectors to request in this turn */
int correction; /* Number of sectors more we need in the proxy*/
@@ -505,9 +513,6 @@ static int drbd_rs_controller(struct drbd_device *device)
int max_sect;
struct fifo_buffer *plan;
- sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */
- device->rs_in_flight -= sect_in;
-
dc = rcu_dereference(device->ldev->disk_conf);
plan = rcu_dereference(device->rs_plan_s);
@@ -550,11 +555,16 @@ static int drbd_rs_controller(struct drbd_device *device)
static int drbd_rs_number_requests(struct drbd_device *device)
{
- int number;
+ unsigned int sect_in; /* Number of sectors that came in since the last turn */
+ int number, mxb;
+
+ sect_in = atomic_xchg(&device->rs_sect_in, 0);
+ device->rs_in_flight -= sect_in;
rcu_read_lock();
+ mxb = drbd_get_max_buffers(device) / 2;
if (rcu_dereference(device->rs_plan_s)->size) {
- number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9);
+ number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9);
device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
@@ -562,8 +572,14 @@ static int drbd_rs_number_requests(struct drbd_device *device)
}
rcu_read_unlock();
- /* ignore the amount of pending requests, the resync controller should
- * throttle down to incoming reply rate soon enough anyways. */
+ /* Don't have more than "max-buffers"/2 in-flight.
+ * Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
+ * potentially causing a distributed deadlock on congestion during
+ * online-verify or (checksum-based) resync, if max-buffers,
+ * socket buffer sizes and resync rate settings are mis-configured. */
+ if (mxb - device->rs_in_flight < number)
+ number = mxb - device->rs_in_flight;
+
return number;
}
@@ -597,7 +613,7 @@ static int make_resync_request(struct drbd_device *device, int cancel)
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
number = drbd_rs_number_requests(device);
- if (number == 0)
+ if (number <= 0)
goto requeue;
for (i = 0; i < number; i++) {
@@ -647,7 +663,7 @@ next_sector:
*/
align = 1;
rollback_i = i;
- for (;;) {
+ while (i < number) {
if (size + BM_BLOCK_SIZE > max_bio_size)
break;
@@ -1670,11 +1686,15 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
}
clear_bit(B_RS_H_DONE, &device->flags);
- write_lock_irq(&global_state_lock);
+ /* req_lock: serialize with drbd_send_and_submit() and others
+ * global_state_lock: for stable sync-after dependencies */
+ spin_lock_irq(&device->resource->req_lock);
+ write_lock(&global_state_lock);
/* Did some connection breakage or IO error race with us? */
if (device->state.conn < C_CONNECTED
|| !get_ldev_if_state(device, D_NEGOTIATING)) {
- write_unlock_irq(&global_state_lock);
+ write_unlock(&global_state_lock);
+ spin_unlock_irq(&device->resource->req_lock);
mutex_unlock(device->state_mutex);
return;
}
@@ -1714,7 +1734,8 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
}
_drbd_pause_after(device);
}
- write_unlock_irq(&global_state_lock);
+ write_unlock(&global_state_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (r == SS_SUCCESS) {
/* reset rs_last_bcast when a resync or verify is started,
@@ -1778,34 +1799,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
mutex_unlock(device->state_mutex);
}
-/* If the resource already closed the current epoch, but we did not
- * (because we have not yet seen new requests), we should send the
- * corresponding barrier now. Must be checked within the same spinlock
- * that is used to check for new requests. */
-static bool need_to_send_barrier(struct drbd_connection *connection)
-{
- if (!connection->send.seen_any_write_yet)
- return false;
-
- /* Skip barriers that do not contain any writes.
- * This may happen during AHEAD mode. */
- if (!connection->send.current_epoch_writes)
- return false;
-
- /* ->req_lock is held when requests are queued on
- * connection->sender_work, and put into ->transfer_log.
- * It is also held when ->current_tle_nr is increased.
- * So either there are already new requests queued,
- * and corresponding barriers will be send there.
- * Or nothing new is queued yet, so the difference will be 1.
- */
- if (atomic_read(&connection->current_tle_nr) !=
- connection->send.current_epoch_nr + 1)
- return false;
-
- return true;
-}
-
static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
@@ -1864,12 +1857,22 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
spin_unlock_irq(&connection->resource->req_lock);
break;
}
- send_barrier = need_to_send_barrier(connection);
+
+ /* We found nothing new to do, no to-be-communicated request,
+ * no other work item. We may still need to close the last
+ * epoch. Next incoming request epoch will be connection ->
+ * current transfer log epoch number. If that is different
+ * from the epoch of the last request we communicated, it is
+ * safe to send the epoch separating barrier now.
+ */
+ send_barrier =
+ atomic_read(&connection->current_tle_nr) !=
+ connection->send.current_epoch_nr;
spin_unlock_irq(&connection->resource->req_lock);
- if (send_barrier) {
- drbd_send_barrier(connection);
- connection->send.current_epoch_nr++;
- }
+
+ if (send_barrier)
+ maybe_send_barrier(connection,
+ connection->send.current_epoch_nr + 1);
schedule();
/* may be woken up for other things but new work, too,
* e.g. if the current epoch got closed.
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
deleted file mode 100644
index 3db9ebaf64f6..000000000000
--- a/drivers/block/drbd/drbd_wrappers.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _DRBD_WRAPPERS_H
-#define _DRBD_WRAPPERS_H
-
-#include <linux/ctype.h>
-#include <linux/mm.h>
-#include "drbd_int.h"
-
-/* see get_sb_bdev and bd_claim */
-extern char *drbd_sec_holder;
-
-/* sets the number of 512 byte sectors of our virtual device */
-static inline void drbd_set_my_capacity(struct drbd_device *device,
- sector_t size)
-{
- /* set_capacity(device->this_bdev->bd_disk, size); */
- set_capacity(device->vdisk, size);
- device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
-}
-
-#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
-
-/* bi_end_io handlers */
-extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_peer_request_endio(struct bio *bio, int error);
-extern void drbd_request_endio(struct bio *bio, int error);
-
-/*
- * used to submit our private bio
- */
-static inline void drbd_generic_make_request(struct drbd_device *device,
- int fault_type, struct bio *bio)
-{
- __release(local);
- if (!bio->bi_bdev) {
- printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
- "bio->bi_bdev == NULL\n",
- device_to_minor(device));
- dump_stack();
- bio_endio(bio, -ENODEV);
- return;
- }
-
- if (drbd_insert_fault(device, fault_type))
- bio_endio(bio, -EIO);
- else
- generic_make_request(bio);
-}
-
-#ifndef __CHECKER__
-# undef __cond_lock
-# define __cond_lock(x,c) (c)
-#endif
-
-#endif
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fa9bb742df6e..677db049f55a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
}
if (CT(COMMAND) != FD_READ ||
- raw_cmd->kernel_data == current_req->buffer) {
+ raw_cmd->kernel_data == bio_data(current_req->bio)) {
/* transfer directly from buffer */
cont->done(1);
} else if (CT(COMMAND) == FD_READ) {
@@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void)
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
- } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) {
+ } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
unsigned long dma_limit;
int direct, indirect;
@@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void)
*/
max_size = buffer_chain_size();
dma_limit = (MAX_DMA_ADDRESS -
- ((unsigned long)current_req->buffer)) >> 9;
+ ((unsigned long)bio_data(current_req->bio))) >> 9;
if ((unsigned long)max_size > dma_limit)
max_size = dma_limit;
/* 64 kb boundaries */
- if (CROSS_64KB(current_req->buffer, max_size << 9))
+ if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
max_size = (K_64 -
- ((unsigned long)current_req->buffer) %
+ ((unsigned long)bio_data(current_req->bio)) %
K_64) >> 9;
direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
/*
@@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void)
(DP->read_track & (1 << DRS->probed_format)))))) {
max_size = blk_rq_sectors(current_req);
} else {
- raw_cmd->kernel_data = current_req->buffer;
+ raw_cmd->kernel_data = bio_data(current_req->bio);
raw_cmd->length = current_count_sectors << 9;
if (raw_cmd->length == 0) {
DPRINT("%s: zero dma transfer attempted\n", __func__);
@@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void)
raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
- (raw_cmd->kernel_data != current_req->buffer &&
+ (raw_cmd->kernel_data != bio_data(current_req->bio) &&
CT(COMMAND) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
@@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void)
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
- if (raw_cmd->kernel_data != current_req->buffer)
+ if (raw_cmd->kernel_data != bio_data(current_req->bio))
pr_info("addr=%d, length=%ld\n",
(int)((raw_cmd->kernel_data -
floppy_track_buffer) >> 9),
@@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void)
return 0;
}
- if (raw_cmd->kernel_data != current_req->buffer) {
+ if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
if (raw_cmd->kernel_data < floppy_track_buffer ||
current_count_sectors < 0 ||
raw_cmd->length < 0 ||
@@ -3812,7 +3812,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_iter.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_iter.bi_sector = 0;
- bio.bi_flags = (1 << BIO_QUIET);
+ bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index bf397bf108b7..8a290c08262f 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -464,11 +464,11 @@ static void read_intr(void)
ok_to_read:
req = hd_req;
- insw(HD_DATA, req->buffer, 256);
+ insw(HD_DATA, bio_data(req->bio), 256);
#ifdef DEBUG
printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
req->rq_disk->disk_name, blk_rq_pos(req) + 1,
- blk_rq_sectors(req) - 1, req->buffer+512);
+ blk_rq_sectors(req) - 1, bio_data(req->bio)+512);
#endif
if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
@@ -505,7 +505,7 @@ static void write_intr(void)
ok_to_write:
if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
- outsw(HD_DATA, req->buffer, 256);
+ outsw(HD_DATA, bio_data(req->bio), 256);
return;
}
@@ -624,7 +624,7 @@ repeat:
printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
req->rq_disk->disk_name,
req_data_dir(req) == READ ? "read" : "writ",
- cyl, head, sec, nsect, req->buffer);
+ cyl, head, sec, nsect, bio_data(req->bio));
#endif
if (req->cmd_type == REQ_TYPE_FS) {
switch (rq_data_dir(req)) {
@@ -643,7 +643,7 @@ repeat:
bad_rw_intr();
goto repeat;
}
- outsw(HD_DATA, req->buffer, 256);
+ outsw(HD_DATA, bio_data(req->bio), 256);
break;
default:
printk("unknown hd-command\n");
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index eb59b1241366..e352cac707e8 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read_one(struct mg_host *host, struct request *req)
{
- u16 *buff = (u16 *)req->buffer;
+ u16 *buff = (u16 *)bio_data(req->bio);
u32 i;
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
@@ -496,7 +496,7 @@ static void mg_read(struct request *req)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+ blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio));
do {
if (mg_wait(host, ATA_DRQ,
@@ -514,7 +514,7 @@ static void mg_read(struct request *req)
static void mg_write_one(struct mg_host *host, struct request *req)
{
- u16 *buff = (u16 *)req->buffer;
+ u16 *buff = (u16 *)bio_data(req->bio);
u32 i;
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
@@ -534,7 +534,7 @@ static void mg_write(struct request *req)
}
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- rem, blk_rq_pos(req), req->buffer);
+ rem, blk_rq_pos(req), bio_data(req->bio));
if (mg_wait(host, ATA_DRQ,
MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
@@ -585,7 +585,7 @@ ok_to_read:
mg_read_one(host, req);
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
+ blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio));
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
@@ -624,7 +624,7 @@ ok_to_write:
/* write 1 sector and set handler if remains */
mg_write_one(host, req);
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
+ blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio));
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 59c5abe32f06..abc858b3528b 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/bio.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
@@ -173,60 +174,34 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev)
return false; /* device present */
}
-/*
- * Obtain an empty command slot.
- *
- * This function needs to be reentrant since it could be called
- * at the same time on multiple CPUs. The allocation of the
- * command slot must be atomic.
- *
- * @port Pointer to the port data structure.
- *
- * return value
- * >= 0 Index of command slot obtained.
- * -1 No command slots available.
- */
-static int get_slot(struct mtip_port *port)
+static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
- int slot, i;
- unsigned int num_command_slots = port->dd->slot_groups * 32;
+ struct request *rq;
- /*
- * Try 10 times, because there is a small race here.
- * that's ok, because it's still cheaper than a lock.
- *
- * Race: Since this section is not protected by lock, same bit
- * could be chosen by different process contexts running in
- * different processor. So instead of costly lock, we are going
- * with loop.
- */
- for (i = 0; i < 10; i++) {
- slot = find_next_zero_bit(port->allocated,
- num_command_slots, 1);
- if ((slot < num_command_slots) &&
- (!test_and_set_bit(slot, port->allocated)))
- return slot;
- }
- dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
+ rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
+ return blk_mq_rq_to_pdu(rq);
+}
- mtip_check_surprise_removal(port->dd->pdev);
- return -1;
+static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
+{
+ blk_put_request(blk_mq_rq_from_pdu(cmd));
}
/*
- * Release a command slot.
- *
- * @port Pointer to the port data structure.
- * @tag Tag of command to release
- *
- * return value
- * None
+ * Once we add support for one hctx per mtip group, this will change a bit
*/
-static inline void release_slot(struct mtip_port *port, int tag)
+static struct request *mtip_rq_from_tag(struct driver_data *dd,
+ unsigned int tag)
+{
+ return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag);
+}
+
+static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
+ unsigned int tag)
{
- smp_mb__before_clear_bit();
- clear_bit(tag, port->allocated);
- smp_mb__after_clear_bit();
+ struct request *rq = mtip_rq_from_tag(dd, tag);
+
+ return blk_mq_rq_to_pdu(rq);
}
/*
@@ -248,93 +223,28 @@ static inline void release_slot(struct mtip_port *port, int tag)
* None
*/
static void mtip_async_complete(struct mtip_port *port,
- int tag,
- void *data,
- int status)
+ int tag, struct mtip_cmd *cmd, int status)
{
- struct mtip_cmd *cmd;
- struct driver_data *dd = data;
- int unaligned, cb_status = status ? -EIO : 0;
- void (*func)(void *, int);
+ struct driver_data *dd = port->dd;
+ struct request *rq;
if (unlikely(!dd) || unlikely(!port))
return;
- cmd = &port->commands[tag];
-
if (unlikely(status == PORT_IRQ_TF_ERR)) {
dev_warn(&port->dd->pdev->dev,
"Command tag %d failed due to TFE\n", tag);
}
- /* Clear the active flag */
- atomic_set(&port->commands[tag].active, 0);
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
- /* Upper layer callback */
- func = cmd->async_callback;
- if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) {
+ rq = mtip_rq_from_tag(dd, tag);
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev,
- cmd->sg,
- cmd->scatter_ents,
- cmd->direction);
+ if (unlikely(cmd->unaligned))
+ up(&port->cmd_slot_unal);
- func(cmd->async_data, cb_status);
- unaligned = cmd->unaligned;
-
- /* Clear the allocated bit for the command */
- release_slot(port, tag);
-
- if (unlikely(unaligned))
- up(&port->cmd_slot_unal);
- else
- up(&port->cmd_slot);
- }
-}
-
-/*
- * This function is called for clean the pending command in the
- * command slot during the surprise removal of device and return
- * error to the upper layer.
- *
- * @dd Pointer to the DRIVER_DATA structure.
- *
- * return value
- * None
- */
-static void mtip_command_cleanup(struct driver_data *dd)
-{
- int tag = 0;
- struct mtip_cmd *cmd;
- struct mtip_port *port = dd->port;
- unsigned int num_cmd_slots = dd->slot_groups * 32;
-
- if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
- return;
-
- if (!port)
- return;
-
- cmd = &port->commands[MTIP_TAG_INTERNAL];
- if (atomic_read(&cmd->active))
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
- (1 << MTIP_TAG_INTERNAL))
- if (cmd->comp_func)
- cmd->comp_func(port, MTIP_TAG_INTERNAL,
- cmd->comp_data, -ENODEV);
-
- while (1) {
- tag = find_next_bit(port->allocated, num_cmd_slots, tag);
- if (tag >= num_cmd_slots)
- break;
-
- cmd = &port->commands[tag];
- if (atomic_read(&cmd->active))
- mtip_async_complete(port, tag, dd, -ENODEV);
- }
-
- set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+ blk_mq_end_io(rq, status ? -EIO : 0);
}
/*
@@ -388,8 +298,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
int group = tag >> 5;
- atomic_set(&port->commands[tag].active, 1);
-
/* guard SACT and CI registers */
spin_lock(&port->cmd_issue_lock[group]);
writel((1 << MTIP_TAG_BIT(tag)),
@@ -397,10 +305,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
spin_unlock(&port->cmd_issue_lock[group]);
-
- /* Set the command's timeout value.*/
- port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
- MTIP_NCQ_COMMAND_TIMEOUT_MS);
}
/*
@@ -648,132 +552,13 @@ static void print_tags(struct driver_data *dd,
memset(tagmap, 0, sizeof(tagmap));
for (group = SLOTBITS_IN_LONGS; group > 0; group--)
- tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
+ tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
tagbits[group-1]);
dev_warn(&dd->pdev->dev,
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
/*
- * Called periodically to see if any read/write commands are
- * taking too long to complete.
- *
- * @data Pointer to the PORT data structure.
- *
- * return value
- * None
- */
-static void mtip_timeout_function(unsigned long int data)
-{
- struct mtip_port *port = (struct mtip_port *) data;
- struct host_to_dev_fis *fis;
- struct mtip_cmd *cmd;
- int unaligned, tag, cmdto_cnt = 0;
- unsigned int bit, group;
- unsigned int num_command_slots;
- unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
- void (*func)(void *, int);
-
- if (unlikely(!port))
- return;
-
- if (unlikely(port->dd->sr))
- return;
-
- if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
- mod_timer(&port->cmd_timer,
- jiffies + msecs_to_jiffies(30000));
- return;
- }
- /* clear the tag accumulator */
- memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
- num_command_slots = port->dd->slot_groups * 32;
-
- for (tag = 0; tag < num_command_slots; tag++) {
- /*
- * Skip internal command slot as it has
- * its own timeout mechanism
- */
- if (tag == MTIP_TAG_INTERNAL)
- continue;
-
- if (atomic_read(&port->commands[tag].active) &&
- (time_after(jiffies, port->commands[tag].comp_time))) {
- group = tag >> 5;
- bit = tag & 0x1F;
-
- cmd = &port->commands[tag];
- fis = (struct host_to_dev_fis *) cmd->command;
-
- set_bit(tag, tagaccum);
- cmdto_cnt++;
- if (cmdto_cnt == 1)
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
- /*
- * Clear the completed bit. This should prevent
- * any interrupt handlers from trying to retire
- * the command.
- */
- writel(1 << bit, port->completed[group]);
-
- /* Clear the active flag for the command */
- atomic_set(&port->commands[tag].active, 0);
-
- func = cmd->async_callback;
- if (func &&
- cmpxchg(&cmd->async_callback, func, 0) == func) {
-
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&port->dd->pdev->dev,
- cmd->sg,
- cmd->scatter_ents,
- cmd->direction);
-
- func(cmd->async_data, -EIO);
- unaligned = cmd->unaligned;
-
- /* Clear the allocated bit for the command. */
- release_slot(port, tag);
-
- if (unaligned)
- up(&port->cmd_slot_unal);
- else
- up(&port->cmd_slot);
- }
- }
- }
-
- if (cmdto_cnt) {
- print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
- if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- mtip_device_reset(port->dd);
- wake_up_interruptible(&port->svc_wait);
- }
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- }
-
- if (port->ic_pause_timer) {
- to = port->ic_pause_timer + msecs_to_jiffies(1000);
- if (time_after(jiffies, to)) {
- if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- port->ic_pause_timer = 0;
- clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
- clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
- clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
- }
-
-
- }
- }
-
- /* Restart the timer */
- mod_timer(&port->cmd_timer,
- jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
-}
-
-/*
* Internal command completion callback function.
*
* This function is normally called by the driver ISR when an internal
@@ -789,28 +574,19 @@ static void mtip_timeout_function(unsigned long int data)
* None
*/
static void mtip_completion(struct mtip_port *port,
- int tag,
- void *data,
- int status)
+ int tag, struct mtip_cmd *command, int status)
{
- struct mtip_cmd *command = &port->commands[tag];
- struct completion *waiting = data;
+ struct completion *waiting = command->comp_data;
if (unlikely(status == PORT_IRQ_TF_ERR))
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
- command->async_callback = NULL;
- command->comp_func = NULL;
-
complete(waiting);
}
static void mtip_null_completion(struct mtip_port *port,
- int tag,
- void *data,
- int status)
+ int tag, struct mtip_cmd *command, int status)
{
- return;
}
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
@@ -842,19 +618,16 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- /* Stop the timer to prevent command timeouts. */
- del_timer(&port->cmd_timer);
set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
- cmd = &port->commands[MTIP_TAG_INTERNAL];
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
- atomic_inc(&cmd->active); /* active > 1 indicates error */
if (cmd->comp_data && cmd->comp_func) {
cmd->comp_func(port, MTIP_TAG_INTERNAL,
- cmd->comp_data, PORT_IRQ_TF_ERR);
+ cmd, PORT_IRQ_TF_ERR);
}
goto handle_tfe_exit;
}
@@ -866,6 +639,8 @@ static void mtip_handle_tfe(struct driver_data *dd)
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
+ dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
+
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
@@ -879,15 +654,11 @@ static void mtip_handle_tfe(struct driver_data *dd)
if (tag == MTIP_TAG_INTERNAL)
continue;
- cmd = &port->commands[tag];
+ cmd = mtip_cmd_from_tag(dd, tag);
if (likely(cmd->comp_func)) {
set_bit(tag, tagaccum);
cmd_cnt++;
- atomic_set(&cmd->active, 0);
- cmd->comp_func(port,
- tag,
- cmd->comp_data,
- 0);
+ cmd->comp_func(port, tag, cmd, 0);
} else {
dev_err(&port->dd->pdev->dev,
"Missing completion func for tag %d",
@@ -947,11 +718,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
for (bit = 0; bit < 32; bit++) {
reissue = 1;
tag = (group << 5) + bit;
- cmd = &port->commands[tag];
-
- /* If the active bit is set re-issue the command */
- if (atomic_read(&cmd->active) == 0)
- continue;
+ cmd = mtip_cmd_from_tag(dd, tag);
fis = (struct host_to_dev_fis *)cmd->command;
@@ -970,11 +737,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
tag,
fail_reason != NULL ?
fail_reason : "unknown");
- atomic_set(&cmd->active, 0);
if (cmd->comp_func) {
cmd->comp_func(port, tag,
- cmd->comp_data,
- -ENODATA);
+ cmd, -ENODATA);
}
continue;
}
@@ -997,14 +762,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
/* Retire a command that will not be reissued */
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- atomic_set(&cmd->active, 0);
if (cmd->comp_func)
- cmd->comp_func(
- port,
- tag,
- cmd->comp_data,
- PORT_IRQ_TF_ERR);
+ cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
else
dev_warn(&port->dd->pdev->dev,
"Bad completion for tag %d\n",
@@ -1017,9 +777,6 @@ handle_tfe_exit:
/* clear eh_active */
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
-
- mod_timer(&port->cmd_timer,
- jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
}
/*
@@ -1048,15 +805,10 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
if (unlikely(tag == MTIP_TAG_INTERNAL))
continue;
- command = &port->commands[tag];
- /* make internal callback */
- if (likely(command->comp_func)) {
- command->comp_func(
- port,
- tag,
- command->comp_data,
- 0);
- } else {
+ command = mtip_cmd_from_tag(dd, tag);
+ if (likely(command->comp_func))
+ command->comp_func(port, tag, command, 0);
+ else {
dev_dbg(&dd->pdev->dev,
"Null completion for tag %d",
tag);
@@ -1081,16 +833,13 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
{
struct mtip_port *port = dd->port;
- struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
+ struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
(cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))) {
if (cmd->comp_func) {
- cmd->comp_func(port,
- MTIP_TAG_INTERNAL,
- cmd->comp_data,
- 0);
+ cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
return;
}
}
@@ -1103,8 +852,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
*/
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
{
- if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
- mtip_handle_tfe(dd);
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
dev_warn(&dd->pdev->dev,
@@ -1122,6 +869,12 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
dev_warn(&dd->pdev->dev,
"Port stat errors %x unhandled\n",
(port_stat & ~PORT_IRQ_HANDLED));
+ if (mtip_check_surprise_removal(dd->pdev))
+ return;
+ }
+ if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
}
}
@@ -1222,7 +975,6 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance)
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
{
- atomic_set(&port->commands[tag].active, 1);
writel(1 << MTIP_TAG_BIT(tag),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
@@ -1280,6 +1032,8 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
unsigned int n;
unsigned int active = 1;
+ blk_mq_stop_hw_queues(port->dd->queue);
+
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
@@ -1287,8 +1041,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
msleep(20);
continue; /* svc thd is actively issuing commands */
}
+
+ msleep(100);
+ if (mtip_check_surprise_removal(port->dd->pdev))
+ goto err_fault;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- return -EFAULT;
+ goto err_fault;
+
/*
* Ignore s_active bit 0 of array element 0.
* This bit will always be set
@@ -1299,11 +1058,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
if (!active)
break;
-
- msleep(20);
} while (time_before(jiffies, to));
+ blk_mq_start_stopped_hw_queues(port->dd->queue, true);
return active ? -EBUSY : 0;
+err_fault:
+ blk_mq_start_stopped_hw_queues(port->dd->queue, true);
+ return -EFAULT;
}
/*
@@ -1335,10 +1096,9 @@ static int mtip_exec_internal_command(struct mtip_port *port,
{
struct mtip_cmd_sg *command_sg;
DECLARE_COMPLETION_ONSTACK(wait);
- int rv = 0, ready2go = 1;
- struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
- unsigned long to;
+ struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
+ int rv = 0;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1346,19 +1106,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
return -EFAULT;
}
- to = jiffies + msecs_to_jiffies(timeout);
- do {
- ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
- port->allocated);
- if (ready2go)
- break;
- mdelay(100);
- } while (time_before(jiffies, to));
- if (!ready2go) {
- dev_warn(&dd->pdev->dev,
- "Internal cmd active. new cmd [%02X]\n", fis->command);
- return -EBUSY;
- }
+ int_cmd = mtip_get_int_command(dd);
+
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
port->ic_pause_timer = 0;
@@ -1368,10 +1117,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (atomic == GFP_KERNEL) {
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
- if (mtip_quiesce_io(port, 5000) < 0) {
+ if (mtip_quiesce_io(port,
+ MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
- release_slot(port, MTIP_TAG_INTERNAL);
+ mtip_put_int_command(dd, int_cmd);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
return -EBUSY;
@@ -1416,9 +1166,9 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (atomic == GFP_KERNEL) {
/* Wait for the command to complete or timeout. */
- if (wait_for_completion_interruptible_timeout(
+ if ((rv = wait_for_completion_interruptible_timeout(
&wait,
- msecs_to_jiffies(timeout)) <= 0) {
+ msecs_to_jiffies(timeout))) <= 0) {
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
"Internal command [%02X] was interrupted after %lu ms\n",
@@ -1497,8 +1247,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
- atomic_set(&int_cmd->active, 0);
- release_slot(port, MTIP_TAG_INTERNAL);
+ mtip_put_int_command(dd, int_cmd);
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
/* NCQ paused */
return rv;
@@ -1529,6 +1278,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len)
be16_to_cpus(&buf[i]);
}
+static void mtip_set_timeout(struct driver_data *dd,
+ struct host_to_dev_fis *fis,
+ unsigned int *timeout, u8 erasemode)
+{
+ switch (fis->command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ *timeout = 120000; /* 2 minutes */
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case 0xFC:
+ if (erasemode)
+ *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+ else
+ *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ *timeout = 120000; /* 2 minutes */
+ break;
+ case 0xF7:
+ case 0xFA:
+ *timeout = 60000; /* 60 seconds */
+ break;
+ case ATA_CMD_SMART:
+ *timeout = 15000; /* 15 seconds */
+ break;
+ default:
+ *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
+ break;
+ }
+}
+
/*
* Request the device identity information.
*
@@ -1576,7 +1356,7 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
sizeof(u16) * ATA_ID_WORDS,
0,
GFP_KERNEL,
- MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
+ MTIP_INT_CMD_TIMEOUT_MS)
< 0) {
rv = -1;
goto out;
@@ -1644,6 +1424,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
int rv;
struct host_to_dev_fis fis;
unsigned long start;
+ unsigned int timeout;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1651,6 +1432,8 @@ static int mtip_standby_immediate(struct mtip_port *port)
fis.opts = 1 << 7;
fis.command = ATA_CMD_STANDBYNOW1;
+ mtip_set_timeout(port->dd, &fis, &timeout, 0);
+
start = jiffies;
rv = mtip_exec_internal_command(port,
&fis,
@@ -1659,7 +1442,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
0,
0,
GFP_ATOMIC,
- 15000);
+ timeout);
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
jiffies_to_msecs(jiffies - start));
if (rv)
@@ -1705,7 +1488,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
sectors * ATA_SECT_SIZE,
0,
GFP_ATOMIC,
- MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
+ MTIP_INT_CMD_TIMEOUT_MS);
}
/*
@@ -1998,6 +1781,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
{
struct host_to_dev_fis fis;
struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+ unsigned int to;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -2011,6 +1795,8 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
fis.cyl_hi = command[5];
fis.device = command[6] & ~0x10; /* Clear the dev bit*/
+ mtip_set_timeout(port->dd, &fis, &to, 0);
+
dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
__func__,
command[0],
@@ -2029,7 +1815,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
0,
0,
GFP_KERNEL,
- MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
+ to) < 0) {
return -1;
}
@@ -2069,6 +1855,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
u8 *buf = NULL;
dma_addr_t dma_addr = 0;
int rv = 0, xfer_sz = command[3];
+ unsigned int to;
if (xfer_sz) {
if (!user_buffer)
@@ -2100,6 +1887,8 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
fis.cyl_hi = 0xC2;
}
+ mtip_set_timeout(port->dd, &fis, &to, 0);
+
if (xfer_sz)
reply = (port->rxfis + RX_FIS_PIO_SETUP);
else
@@ -2122,7 +1911,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
GFP_KERNEL,
- MTIP_IOCTL_COMMAND_TIMEOUT_MS)
+ to)
< 0) {
rv = -EFAULT;
goto exit_drive_command;
@@ -2202,36 +1991,6 @@ static unsigned int implicit_sector(unsigned char command,
}
return rv;
}
-static void mtip_set_timeout(struct driver_data *dd,
- struct host_to_dev_fis *fis,
- unsigned int *timeout, u8 erasemode)
-{
- switch (fis->command) {
- case ATA_CMD_DOWNLOAD_MICRO:
- *timeout = 120000; /* 2 minutes */
- break;
- case ATA_CMD_SEC_ERASE_UNIT:
- case 0xFC:
- if (erasemode)
- *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
- else
- *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
- break;
- case ATA_CMD_STANDBYNOW1:
- *timeout = 120000; /* 2 minutes */
- break;
- case 0xF7:
- case 0xFA:
- *timeout = 60000; /* 60 seconds */
- break;
- case ATA_CMD_SMART:
- *timeout = 15000; /* 15 seconds */
- break;
- default:
- *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
- break;
- }
-}
/*
* Executes a taskfile
@@ -2606,22 +2365,21 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* return value
* None
*/
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
- int nsect, int nents, int tag, void *callback,
- void *data, int dir, int unaligned)
+static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
+ struct mtip_cmd *command, int nents,
+ struct blk_mq_hw_ctx *hctx)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
- struct mtip_cmd *command = &port->commands[tag];
- int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- u64 start = sector;
+ int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ u64 start = blk_rq_pos(rq);
+ unsigned int nsect = blk_rq_sectors(rq);
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
command->scatter_ents = nents;
- command->unaligned = unaligned;
/*
* The number of retries for this command before it is
* reported as a failure to the upper layers.
@@ -2632,8 +2390,10 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
fis = command->command;
fis->type = 0x27;
fis->opts = 1 << 7;
- fis->command =
- (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
+ if (rq_data_dir(rq) == READ)
+ fis->command = ATA_CMD_FPDMA_READ;
+ else
+ fis->command = ATA_CMD_FPDMA_WRITE;
fis->lba_low = start & 0xFF;
fis->lba_mid = (start >> 8) & 0xFF;
fis->lba_hi = (start >> 16) & 0xFF;
@@ -2643,14 +2403,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
- fis->sect_count = ((tag << 3) | (tag >> 5));
+ fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
fis->sect_cnt_ex = 0;
fis->control = 0;
fis->res2 = 0;
fis->res3 = 0;
fill_command_sg(dd, command, nents);
- if (unaligned)
+ if (command->unaligned)
fis->device |= 1 << 7;
/* Populate the command header */
@@ -2668,81 +2428,17 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
command->direction = dma_dir;
/*
- * Set the completion function and data for the command passed
- * from the upper layer.
- */
- command->async_data = data;
- command->async_callback = callback;
-
- /*
* To prevent this command from being issued
* if an internal command is in progress or error handling is active.
*/
if (port->flags & MTIP_PF_PAUSE_IO) {
- set_bit(tag, port->cmds_to_issue);
+ set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return;
}
/* Issue the command to the hardware */
- mtip_issue_ncq_command(port, tag);
-
- return;
-}
-
-/*
- * Release a command slot.
- *
- * @dd Pointer to the driver data structure.
- * @tag Slot tag
- *
- * return value
- * None
- */
-static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
- int unaligned)
-{
- struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
- &dd->port->cmd_slot;
- release_slot(dd->port, tag);
- up(sem);
-}
-
-/*
- * Obtain a command slot and return its associated scatter list.
- *
- * @dd Pointer to the driver data structure.
- * @tag Pointer to an int that will receive the allocated command
- * slot tag.
- *
- * return value
- * Pointer to the scatter list for the allocated command slot
- * or NULL if no command slots are available.
- */
-static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
- int *tag, int unaligned)
-{
- struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
- &dd->port->cmd_slot;
-
- /*
- * It is possible that, even with this semaphore, a thread
- * may think that no command slots are available. Therefore, we
- * need to make an attempt to get_slot().
- */
- down(sem);
- *tag = get_slot(dd->port);
-
- if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
- up(sem);
- return NULL;
- }
- if (unlikely(*tag < 0)) {
- up(sem);
- return NULL;
- }
-
- return dd->port->commands[*tag].sg;
+ mtip_issue_ncq_command(port, rq->tag);
}
/*
@@ -3113,6 +2809,7 @@ static int mtip_free_orphan(struct driver_data *dd)
if (dd->queue) {
dd->queue->queuedata = NULL;
blk_cleanup_queue(dd->queue);
+ blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
}
}
@@ -3270,6 +2967,11 @@ static int mtip_service_thread(void *data)
int ret;
while (1) {
+ if (kthread_should_stop() ||
+ test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+ goto st_out;
+ clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
/*
* the condition is to check neither an internal command is
* is in progress nor error handling is active
@@ -3277,11 +2979,12 @@ static int mtip_service_thread(void *data)
wait_event_interruptible(port->svc_wait, (port->flags) &&
!(port->flags & MTIP_PF_PAUSE_IO));
- if (kthread_should_stop())
- goto st_out;
-
set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ if (kthread_should_stop() ||
+ test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+ goto st_out;
+
/* If I am an orphan, start self cleanup */
if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags))
break;
@@ -3290,6 +2993,16 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+restart_eh:
+ /* Demux bits: start with error handling */
+ if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
+ mtip_handle_tfe(dd);
+ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+ }
+
+ if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
+ goto restart_eh;
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -3319,16 +3032,14 @@ static int mtip_service_thread(void *data)
}
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
- } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+ }
+
+ if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
if (mtip_ftl_rebuild_poll(dd) < 0)
set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
&dd->dd_flag);
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
- clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
-
- if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
- goto st_out;
}
/* wait for pci remove to exit */
@@ -3365,7 +3076,6 @@ st_out:
*/
static void mtip_dma_free(struct driver_data *dd)
{
- int i;
struct mtip_port *port = dd->port;
if (port->block1)
@@ -3376,13 +3086,6 @@ static void mtip_dma_free(struct driver_data *dd)
dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
port->command_list, port->command_list_dma);
}
-
- for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
- if (port->commands[i].command)
- dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
- port->commands[i].command,
- port->commands[i].command_dma);
- }
}
/*
@@ -3396,8 +3099,6 @@ static void mtip_dma_free(struct driver_data *dd)
static int mtip_dma_alloc(struct driver_data *dd)
{
struct mtip_port *port = dd->port;
- int i, rv = 0;
- u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
/* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
port->block1 =
@@ -3430,41 +3131,63 @@ static int mtip_dma_alloc(struct driver_data *dd)
port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
- /* Setup per command SGL DMA region */
-
- /* Point the command headers at the command tables */
- for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
- port->commands[i].command =
- dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
- &port->commands[i].command_dma, GFP_KERNEL);
- if (!port->commands[i].command) {
- rv = -ENOMEM;
- mtip_dma_free(dd);
- return rv;
- }
- memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
-
- port->commands[i].command_header = port->command_list +
- (sizeof(struct mtip_cmd_hdr) * i);
- port->commands[i].command_header_dma =
- dd->port->command_list_dma +
- (sizeof(struct mtip_cmd_hdr) * i);
+ return 0;
+}
- if (host_cap_64)
- port->commands[i].command_header->ctbau =
- __force_bit2int cpu_to_le32(
- (port->commands[i].command_dma >> 16) >> 16);
+static int mtip_hw_get_identify(struct driver_data *dd)
+{
+ struct smart_attr attr242;
+ unsigned char *buf;
+ int rv;
- port->commands[i].command_header->ctba =
- __force_bit2int cpu_to_le32(
- port->commands[i].command_dma & 0xFFFFFFFF);
+ if (mtip_get_identify(dd->port, NULL) < 0)
+ return -EFAULT;
- sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
+ return MTIP_FTL_REBUILD_MAGIC;
+ }
+ mtip_dump_identify(dd->port);
- /* Mark command as currently inactive */
- atomic_set(&dd->port->commands[i].active, 0);
+ /* check write protect, over temp and rebuild statuses */
+ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+ dd->port->log_buf,
+ dd->port->log_buf_dma, 1);
+ if (rv) {
+ dev_warn(&dd->pdev->dev,
+ "Error in READ LOG EXT (10h) command\n");
+ /* non-critical error, don't fail the load */
+ } else {
+ buf = (unsigned char *)dd->port->log_buf;
+ if (buf[259] & 0x1) {
+ dev_info(&dd->pdev->dev,
+ "Write protect bit is set.\n");
+ set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xF7) {
+ dev_info(&dd->pdev->dev,
+ "Exceeded Tmax, drive in thermal shutdown.\n");
+ set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xBF) {
+ dev_info(&dd->pdev->dev,
+ "Drive indicates rebuild has failed.\n");
+ /* TODO */
+ }
}
- return 0;
+
+ /* get write protect progess */
+ memset(&attr242, 0, sizeof(struct smart_attr));
+ if (mtip_get_smart_attr(dd->port, 242, &attr242))
+ dev_warn(&dd->pdev->dev,
+ "Unable to check write protect progress\n");
+ else
+ dev_info(&dd->pdev->dev,
+ "Write protect progress: %u%% (%u blocks)\n",
+ attr242.cur, le32_to_cpu(attr242.data));
+
+ return rv;
}
/*
@@ -3481,8 +3204,6 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned int num_command_slots;
unsigned long timeout, timetaken;
- unsigned char *buf;
- struct smart_attr attr242;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
@@ -3513,8 +3234,6 @@ static int mtip_hw_init(struct driver_data *dd)
else
dd->unal_qdepth = 0;
- /* Counting semaphore to track command slot usage */
- sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
@@ -3599,73 +3318,16 @@ static int mtip_hw_init(struct driver_data *dd)
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
- init_timer(&dd->port->cmd_timer);
init_waitqueue_head(&dd->port->svc_wait);
- dd->port->cmd_timer.data = (unsigned long int) dd->port;
- dd->port->cmd_timer.function = mtip_timeout_function;
- mod_timer(&dd->port->cmd_timer,
- jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
-
-
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
rv = -EFAULT;
goto out3;
}
- if (mtip_get_identify(dd->port, NULL) < 0) {
- rv = -EFAULT;
- goto out3;
- }
- mtip_dump_identify(dd->port);
-
- if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
- MTIP_FTL_REBUILD_MAGIC) {
- set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
- return MTIP_FTL_REBUILD_MAGIC;
- }
-
- /* check write protect, over temp and rebuild statuses */
- rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
- dd->port->log_buf,
- dd->port->log_buf_dma, 1);
- if (rv) {
- dev_warn(&dd->pdev->dev,
- "Error in READ LOG EXT (10h) command\n");
- /* non-critical error, don't fail the load */
- } else {
- buf = (unsigned char *)dd->port->log_buf;
- if (buf[259] & 0x1) {
- dev_info(&dd->pdev->dev,
- "Write protect bit is set.\n");
- set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
- }
- if (buf[288] == 0xF7) {
- dev_info(&dd->pdev->dev,
- "Exceeded Tmax, drive in thermal shutdown.\n");
- set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
- }
- if (buf[288] == 0xBF) {
- dev_info(&dd->pdev->dev,
- "Drive is in security locked state.\n");
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
- }
- }
-
- /* get write protect progess */
- memset(&attr242, 0, sizeof(struct smart_attr));
- if (mtip_get_smart_attr(dd->port, 242, &attr242))
- dev_warn(&dd->pdev->dev,
- "Unable to check write protect progress\n");
- else
- dev_info(&dd->pdev->dev,
- "Write protect progress: %u%% (%u blocks)\n",
- attr242.cur, le32_to_cpu(attr242.data));
return rv;
out3:
- del_timer_sync(&dd->port->cmd_timer);
-
/* Disable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
@@ -3685,6 +3347,22 @@ out1:
return rv;
}
+static void mtip_standby_drive(struct driver_data *dd)
+{
+ if (dd->sr)
+ return;
+
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
+ if (mtip_standby_immediate(dd->port))
+ dev_warn(&dd->pdev->dev,
+ "STANDBY IMMEDIATE failed\n");
+}
+
/*
* Called to deinitialize an interface.
*
@@ -3700,12 +3378,6 @@ static int mtip_hw_exit(struct driver_data *dd)
* saves its state.
*/
if (!dd->sr) {
- if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
- dev_warn(&dd->pdev->dev,
- "STANDBY IMMEDIATE failed\n");
-
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3714,8 +3386,6 @@ static int mtip_hw_exit(struct driver_data *dd)
dd->mmio + HOST_CTL);
}
- del_timer_sync(&dd->port->cmd_timer);
-
/* Release the IRQ. */
irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
@@ -4032,100 +3702,138 @@ static const struct block_device_operations mtip_block_ops = {
*
* @queue Pointer to the request queue. Unused other than to obtain
* the driver data structure.
- * @bio Pointer to the BIO.
+ * @rq Pointer to the request.
*
*/
-static void mtip_make_request(struct request_queue *queue, struct bio *bio)
+static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
- struct driver_data *dd = queue->queuedata;
- struct scatterlist *sg;
- struct bio_vec bvec;
- struct bvec_iter iter;
- int nents = 0;
- int tag = 0, unaligned = 0;
+ struct driver_data *dd = hctx->queue->queuedata;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ unsigned int nents;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag))) {
- bio_endio(bio, -ENXIO);
- return;
+ return -ENXIO;
}
if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
- bio_endio(bio, -ENODATA);
- return;
+ return -ENODATA;
}
if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
&dd->dd_flag) &&
- bio_data_dir(bio))) {
- bio_endio(bio, -ENODATA);
- return;
- }
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
- bio_endio(bio, -ENODATA);
- return;
- }
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
- bio_endio(bio, -ENXIO);
- return;
+ rq_data_dir(rq))) {
+ return -ENODATA;
}
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ return -ENODATA;
+ if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+ return -ENXIO;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
- bio_sectors(bio)));
- return;
- }
+ if (rq->cmd_flags & REQ_DISCARD) {
+ int err;
- if (unlikely(!bio_has_data(bio))) {
- blk_queue_flush(queue, 0);
- bio_endio(bio, 0);
- return;
+ err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
+ blk_mq_end_io(rq, err);
+ return 0;
}
- if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
- dd->unal_qdepth) {
- if (bio->bi_iter.bi_sector % 8 != 0)
- /* Unaligned on 4k boundaries */
- unaligned = 1;
- else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
- unaligned = 1;
+ /* Create the scatter list for this request. */
+ nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
+
+ /* Issue the read/write. */
+ mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
+ return 0;
+}
+
+static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct driver_data *dd = hctx->queue->queuedata;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
+ return false;
+
+ /*
+ * If unaligned depth must be limited on this controller, mark it
+ * as unaligned if the IO isn't on a 4k boundary (start of length).
+ */
+ if (blk_rq_sectors(rq) <= 64) {
+ if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
+ cmd->unaligned = 1;
}
- sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
- if (likely(sg != NULL)) {
- blk_queue_bounce(queue, &bio);
+ if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
+ return true;
- if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
- dev_warn(&dd->pdev->dev,
- "Maximum number of SGL entries exceeded\n");
- bio_io_error(bio);
- mtip_hw_release_scatterlist(dd, tag, unaligned);
- return;
- }
+ return false;
+}
- /* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, iter) {
- sg_set_page(&sg[nents],
- bvec.bv_page,
- bvec.bv_len,
- bvec.bv_offset);
- nents++;
- }
+static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ int ret;
- /* Issue the read/write. */
- mtip_hw_submit_io(dd,
- bio->bi_iter.bi_sector,
- bio_sectors(bio),
- nents,
- tag,
- bio_endio,
- bio,
- bio_data_dir(bio),
- unaligned);
- } else
- bio_io_error(bio);
+ if (mtip_check_unal_depth(hctx, rq))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ ret = mtip_submit_request(hctx, rq);
+ if (!ret)
+ return BLK_MQ_RQ_QUEUE_OK;
+
+ rq->errors = ret;
+ return BLK_MQ_RQ_QUEUE_ERROR;
+}
+
+static void mtip_free_cmd(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx)
+{
+ struct driver_data *dd = data;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (!cmd->command)
+ return;
+
+ dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ cmd->command, cmd->command_dma);
+}
+
+static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
+ unsigned int request_idx, unsigned int numa_node)
+{
+ struct driver_data *dd = data;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+ cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ &cmd->command_dma, GFP_KERNEL);
+ if (!cmd->command)
+ return -ENOMEM;
+
+ memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
+
+ /* Point the command headers at the command tables. */
+ cmd->command_header = dd->port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * request_idx);
+ cmd->command_header_dma = dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * request_idx);
+
+ if (host_cap_64)
+ cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
+
+ cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
+
+ sg_init_table(cmd->sg, MTIP_MAX_SG);
+ return 0;
}
+static struct blk_mq_ops mtip_mq_ops = {
+ .queue_rq = mtip_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = mtip_init_cmd,
+ .exit_request = mtip_free_cmd,
+};
+
/*
* Block layer initialization function.
*
@@ -4148,11 +3856,7 @@ static int mtip_block_initialize(struct driver_data *dd)
if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */
- /* Initialize the protocol layer. */
- wait_for_rebuild = mtip_hw_init(dd);
- if (wait_for_rebuild < 0) {
- dev_err(&dd->pdev->dev,
- "Protocol layer initialization failed\n");
+ if (mtip_hw_init(dd)) {
rv = -EINVAL;
goto protocol_init_error;
}
@@ -4194,29 +3898,53 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
- /*
- * if rebuild pending, start the service thread, and delay the block
- * queue creation and add_disk()
- */
- if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
- goto start_service_thread;
-
skip_create_disk:
- /* Allocate the request queue. */
- dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node);
- if (dd->queue == NULL) {
+ memset(&dd->tags, 0, sizeof(dd->tags));
+ dd->tags.ops = &mtip_mq_ops;
+ dd->tags.nr_hw_queues = 1;
+ dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
+ dd->tags.reserved_tags = 1;
+ dd->tags.cmd_size = sizeof(struct mtip_cmd);
+ dd->tags.numa_node = dd->numa_node;
+ dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+ dd->tags.driver_data = dd;
+
+ rv = blk_mq_alloc_tag_set(&dd->tags);
+ if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
- /* Attach our request function to the request queue. */
- blk_queue_make_request(dd->queue, mtip_make_request);
+ /* Allocate the request queue. */
+ dd->queue = blk_mq_init_queue(&dd->tags);
+ if (IS_ERR(dd->queue)) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate request queue\n");
+ rv = -ENOMEM;
+ goto block_queue_alloc_init_error;
+ }
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+ /* Initialize the protocol layer. */
+ wait_for_rebuild = mtip_hw_get_identify(dd);
+ if (wait_for_rebuild < 0) {
+ dev_err(&dd->pdev->dev,
+ "Protocol layer initialization failed\n");
+ rv = -EINVAL;
+ goto init_hw_cmds_error;
+ }
+
+ /*
+ * if rebuild pending, start the service thread, and delay the block
+ * queue creation and add_disk()
+ */
+ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+ goto start_service_thread;
+
/* Set device limits. */
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
@@ -4295,8 +4023,9 @@ kthread_run_error:
del_gendisk(dd->disk);
read_capacity_error:
+init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
-
+ blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
@@ -4345,6 +4074,9 @@ static int mtip_block_remove(struct driver_data *dd)
kobject_put(kobj);
}
}
+
+ mtip_standby_drive(dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4357,6 +4089,7 @@ static int mtip_block_remove(struct driver_data *dd)
if (dd->disk->queue) {
del_gendisk(dd->disk);
blk_cleanup_queue(dd->queue);
+ blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
} else
put_disk(dd->disk);
@@ -4391,6 +4124,8 @@ static int mtip_block_remove(struct driver_data *dd)
*/
static int mtip_block_shutdown(struct driver_data *dd)
{
+ mtip_hw_shutdown(dd);
+
/* Delete our gendisk structure, and cleanup the blk queue. */
if (dd->disk) {
dev_info(&dd->pdev->dev,
@@ -4399,6 +4134,7 @@ static int mtip_block_shutdown(struct driver_data *dd)
if (dd->disk->queue) {
del_gendisk(dd->disk);
blk_cleanup_queue(dd->queue);
+ blk_mq_free_tag_set(&dd->tags);
} else
put_disk(dd->disk);
dd->disk = NULL;
@@ -4408,8 +4144,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
-
- mtip_hw_shutdown(dd);
return 0;
}
@@ -4479,6 +4213,57 @@ static DEFINE_HANDLER(5);
static DEFINE_HANDLER(6);
static DEFINE_HANDLER(7);
+static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
+{
+ int pos;
+ unsigned short pcie_dev_ctrl;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pos) {
+ pci_read_config_word(pdev,
+ pos + PCI_EXP_DEVCTL,
+ &pcie_dev_ctrl);
+ if (pcie_dev_ctrl & (1 << 11) ||
+ pcie_dev_ctrl & (1 << 4)) {
+ dev_info(&dd->pdev->dev,
+ "Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
+ pdev->vendor, pdev->device);
+ pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
+ PCI_EXP_DEVCTL_RELAX_EN);
+ pci_write_config_word(pdev,
+ pos + PCI_EXP_DEVCTL,
+ pcie_dev_ctrl);
+ }
+ }
+}
+
+static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
+{
+ /*
+ * This workaround is specific to AMD/ATI chipset with a PCI upstream
+ * device with device id 0x5aXX
+ */
+ if (pdev->bus && pdev->bus->self) {
+ if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
+ ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
+ mtip_disable_link_opts(dd, pdev->bus->self);
+ } else {
+ /* Check further up the topology */
+ struct pci_dev *parent_dev = pdev->bus->self;
+ if (parent_dev->bus &&
+ parent_dev->bus->parent &&
+ parent_dev->bus->parent->self &&
+ parent_dev->bus->parent->self->vendor ==
+ PCI_VENDOR_ID_ATI &&
+ (parent_dev->bus->parent->self->device &
+ 0xff00) == 0x5a00) {
+ mtip_disable_link_opts(dd,
+ parent_dev->bus->parent->self);
+ }
+ }
+ }
+}
+
/*
* Called for each supported PCI device detected.
*
@@ -4630,6 +4415,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto msi_initialize_err;
}
+ mtip_fix_ero_nosnoop(dd, pdev);
+
/* Initialize the block layer. */
rv = mtip_block_initialize(dd);
if (rv < 0) {
@@ -4710,8 +4497,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- /* Cleanup the outstanding commands */
- mtip_command_cleanup(dd);
/* Clean up the block layer. */
mtip_block_remove(dd);
@@ -4737,8 +4522,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
- pci_dev_put(pdev);
-
}
/*
@@ -4935,13 +4718,13 @@ static int __init mtip_init(void)
*/
static void __exit mtip_exit(void)
{
- debugfs_remove_recursive(dfs_parent);
-
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
/* Unregister the PCI driver. */
pci_unregister_driver(&mtip_pci_driver);
+
+ debugfs_remove_recursive(dfs_parent);
}
MODULE_AUTHOR("Micron Technology, Inc");
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index ffb955e7ccb9..4b9b554234bc 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -40,9 +40,11 @@
#define MTIP_MAX_RETRIES 2
/* Various timeout values in ms */
-#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
-#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000
-#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000
+#define MTIP_NCQ_CMD_TIMEOUT_MS 15000
+#define MTIP_IOCTL_CMD_TIMEOUT_MS 5000
+#define MTIP_INT_CMD_TIMEOUT_MS 5000
+#define MTIP_QUIESCE_IO_TIMEOUT_MS (MTIP_NCQ_CMD_TIMEOUT_MS * \
+ (MTIP_MAX_RETRIES + 1))
/* check for timeouts every 500ms */
#define MTIP_TIMEOUT_CHECK_PERIOD 500
@@ -331,12 +333,8 @@ struct mtip_cmd {
*/
void (*comp_func)(struct mtip_port *port,
int tag,
- void *data,
+ struct mtip_cmd *cmd,
int status);
- /* Additional callback function that may be called by comp_func() */
- void (*async_callback)(void *data, int status);
-
- void *async_data; /* Addl. data passed to async_callback() */
int scatter_ents; /* Number of scatter list entries used */
@@ -347,10 +345,6 @@ struct mtip_cmd {
int retries; /* The number of retries left for this command. */
int direction; /* Data transfer direction */
-
- unsigned long comp_time; /* command completion time, in jiffies */
-
- atomic_t active; /* declares if this command sent to the drive. */
};
/* Structure used to describe a port. */
@@ -436,12 +430,6 @@ struct mtip_port {
* or error handling is active
*/
unsigned long cmds_to_issue[SLOTBITS_IN_LONGS];
- /*
- * Array of command slots. Structure includes pointers to the
- * command header and command table, and completion function and data
- * pointers.
- */
- struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS];
/* Used by mtip_service_thread to wait for an event */
wait_queue_head_t svc_wait;
/*
@@ -452,13 +440,7 @@ struct mtip_port {
/*
* Timer used to complete commands that have been active for too long.
*/
- struct timer_list cmd_timer;
unsigned long ic_pause_timer;
- /*
- * Semaphore used to block threads if there are no
- * command slots available.
- */
- struct semaphore cmd_slot;
/* Semaphore to control queue depth of unaligned IOs */
struct semaphore cmd_slot_unal;
@@ -485,6 +467,8 @@ struct driver_data {
struct request_queue *queue; /* Our request queue. */
+ struct blk_mq_tag_set tags; /* blk_mq tags */
+
struct mtip_port *port; /* Pointer to the port data structure. */
unsigned product_type; /* magic value declaring the product type */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 091b9ea14feb..77087a29b127 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -32,6 +32,7 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
struct hrtimer timer;
unsigned int queue_depth;
spinlock_t lock;
@@ -202,8 +203,8 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
entry = entry->next;
+ end_cmd(cmd);
} while (entry);
}
@@ -226,7 +227,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- end_cmd(rq->special);
+ end_cmd(blk_mq_rq_to_pdu(rq));
}
static inline void null_handle_cmd(struct nullb_cmd *cmd)
@@ -311,7 +312,7 @@ static void null_request_fn(struct request_queue *q)
static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
- struct nullb_cmd *cmd = rq->special;
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->rq = rq;
cmd->nq = hctx->driver_data;
@@ -320,46 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
return BLK_MQ_RQ_QUEUE_OK;
}
-static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
-{
- int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
- int tip = (reg->nr_hw_queues % nr_online_nodes);
- int node = 0, i, n;
-
- /*
- * Split submit queues evenly wrt to the number of nodes. If uneven,
- * fill the first buckets with one extra, until the rest is filled with
- * no extra.
- */
- for (i = 0, n = 1; i < hctx_index; i++, n++) {
- if (n % b_size == 0) {
- n = 0;
- node++;
-
- tip--;
- if (!tip)
- b_size = reg->nr_hw_queues / nr_online_nodes;
- }
- }
-
- /*
- * A node might not be online, therefore map the relative node id to the
- * real node id.
- */
- for_each_online_node(n) {
- if (!node)
- break;
- node--;
- }
-
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
-}
-
-static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
-{
- kfree(hctx);
-}
-
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
BUG_ON(!nullb);
@@ -389,19 +350,14 @@ static struct blk_mq_ops null_mq_ops = {
.complete = null_softirq_done_fn,
};
-static struct blk_mq_reg null_mq_reg = {
- .ops = &null_mq_ops,
- .queue_depth = 64,
- .cmd_size = sizeof(struct nullb_cmd),
- .flags = BLK_MQ_F_SHOULD_MERGE,
-};
-
static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);
del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q);
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_tag_set(&nullb->tag_set);
put_disk(nullb->disk);
kfree(nullb);
}
@@ -506,7 +462,7 @@ static int null_add_dev(void)
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
if (!nullb)
- return -ENOMEM;
+ goto out;
spin_lock_init(&nullb->lock);
@@ -514,49 +470,44 @@ static int null_add_dev(void)
submit_queues = nr_online_nodes;
if (setup_queues(nullb))
- goto err;
+ goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
- null_mq_reg.numa_node = home_node;
- null_mq_reg.queue_depth = hw_queue_depth;
- null_mq_reg.nr_hw_queues = submit_queues;
-
- if (use_per_node_hctx) {
- null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
- null_mq_reg.ops->free_hctx = null_free_hctx;
- } else {
- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
- }
-
- nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
+ nullb->tag_set.ops = &null_mq_ops;
+ nullb->tag_set.nr_hw_queues = submit_queues;
+ nullb->tag_set.queue_depth = hw_queue_depth;
+ nullb->tag_set.numa_node = home_node;
+ nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
+ nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ nullb->tag_set.driver_data = nullb;
+
+ if (blk_mq_alloc_tag_set(&nullb->tag_set))
+ goto out_cleanup_queues;
+
+ nullb->q = blk_mq_init_queue(&nullb->tag_set);
+ if (!nullb->q)
+ goto out_cleanup_tags;
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ if (!nullb->q)
+ goto out_cleanup_queues;
blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ if (!nullb->q)
+ goto out_cleanup_queues;
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
- if (nullb->q)
- blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb);
}
- if (!nullb->q)
- goto queue_fail;
-
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk) {
-queue_fail:
- blk_cleanup_queue(nullb->q);
- cleanup_queues(nullb);
-err:
- kfree(nullb);
- return -ENOMEM;
- }
+ if (!disk)
+ goto out_cleanup_blk_queue;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -579,6 +530,18 @@ err:
sprintf(disk->disk_name, "nullb%d", nullb->index);
add_disk(disk);
return 0;
+
+out_cleanup_blk_queue:
+ blk_cleanup_queue(nullb->q);
+out_cleanup_tags:
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_tag_set(&nullb->tag_set);
+out_cleanup_queues:
+ cleanup_queues(nullb);
+out_free_nullb:
+ kfree(nullb);
+out:
+ return -ENOMEM;
}
static int __init null_init(void)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 7c64fa756cce..a842c71dcc21 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2775,6 +2775,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ if (prepare)
+ nvme_dev_shutdown(dev);
+ else
+ nvme_dev_resume(dev);
+}
+
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2839,6 +2849,7 @@ static const struct pci_error_handlers nvme_err_handler = {
.link_reset = nvme_link_reset,
.slot_reset = nvme_slot_reset,
.resume = nvme_error_resume,
+ .reset_notify = nvme_reset_notify,
};
/* Move to pci_ids.h later */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e76bdc074dbe..719cb1bc1640 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q)
pcd_current = cd;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
- pcd_buf = pcd_req->buffer;
+ pcd_buf = bio_data(pcd_req->bio);
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 19ad8f0c83ef..fea7e76a00de 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -454,7 +454,7 @@ static enum action do_pd_io_start(void)
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
pd_run = blk_rq_sectors(pd_req);
- pd_buf = pd_req->buffer;
+ pd_buf = bio_data(pd_req->bio);
pd_retries = 0;
if (pd_cmd == READ)
return do_pd_read_start();
@@ -485,7 +485,7 @@ static int pd_next_buf(void)
spin_lock_irqsave(&pd_lock, saved_flags);
__blk_end_request_cur(pd_req, 0);
pd_count = blk_rq_cur_sectors(pd_req);
- pd_buf = pd_req->buffer;
+ pd_buf = bio_data(pd_req->bio);
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index f5c86d523ba0..9a15fd3c9349 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -795,7 +795,7 @@ repeat:
}
pf_cmd = rq_data_dir(pf_req);
- pf_buf = pf_req->buffer;
+ pf_buf = bio_data(pf_req->bio);
pf_retries = 0;
pf_busy = 1;
@@ -827,7 +827,7 @@ static int pf_next_buf(void)
if (!pf_req)
return 1;
pf_count = blk_rq_cur_sectors(pf_req);
- pf_buf = pf_req->buffer;
+ pf_buf = bio_data(pf_req->bio);
}
return 0;
}
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a69dd93d1bd5..608532d3f8c9 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
req = skreq->req;
blk_add_request_payload(req, page, len);
- req->buffer = buf;
}
static void skd_request_fn_not_online(struct request_queue *q);
@@ -744,6 +743,7 @@ static void skd_request_fn(struct request_queue *q)
break;
}
skreq->discard_page = 1;
+ req->completion_data = page;
skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
@@ -858,8 +858,7 @@ static void skd_end_request(struct skd_device *skdev,
(skreq->discard_page == 1)) {
pr_debug("%s:%s:%d, free the page!",
skdev->name, __func__, __LINE__);
- free_page((unsigned long)req->buffer);
- req->buffer = NULL;
+ __free_page(req->completion_data);
}
if (unlikely(error)) {
@@ -3945,15 +3944,14 @@ static int skd_acquire_msix(struct skd_device *skdev)
for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
entries[i].entry = i;
- rc = pci_enable_msix_range(pdev, entries,
- SKD_MIN_MSIX_COUNT, SKD_MAX_MSIX_COUNT);
- if (rc < 0) {
+ rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
+ if (rc) {
pr_err("(%s): failed to enable MSI-X %d\n",
skd_name(skdev), rc);
goto msix_out;
}
- skdev->msix_count = rc;
+ skdev->msix_count = SKD_MAX_MSIX_COUNT;
skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
skdev->msix_count, GFP_KERNEL);
if (!skdev->msix_entries) {
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index b02d53a399f3..6b44bbe528b7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q)
case READ:
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
- req->buffer);
+ bio_data(req->bio));
break;
}
done:
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index c74f7b56e7c4..523ee8fd4c15 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs)
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req),
- req->buffer);
+ bio_data(req->bio));
swim3_dbg(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
#endif
@@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs)
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..f63d358f3d93 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -30,6 +30,9 @@ struct virtio_blk
/* The disk structure for the kernel. */
struct gendisk *disk;
+ /* Block layer tags. */
+ struct blk_mq_tag_set tag_set;
+
/* Process context for config space updates */
struct work_struct config_work;
@@ -112,7 +115,7 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req)
{
- struct virtblk_req *vbr = req->special;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -144,21 +147,22 @@ static void virtblk_done(struct virtqueue *vq)
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
- spin_unlock_irqrestore(&vblk->vq_lock, flags);
/* In case queue is stopped waiting for more buffers. */
if (req_done)
- blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
}
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtblk_req *vbr = req->special;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
const bool last = (req->cmd_flags & REQ_END) != 0;
int err;
+ bool notify = false;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
@@ -202,8 +206,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vq);
- spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
@@ -211,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
return BLK_MQ_RQ_QUEUE_ERROR;
}
- if (last)
- virtqueue_kick(vblk->vq);
-
+ if (last && virtqueue_kick_prepare(vblk->vq))
+ notify = true;
spin_unlock_irqrestore(&vblk->vq_lock, flags);
+
+ if (notify)
+ virtqueue_notify(vblk->vq);
return BLK_MQ_RQ_QUEUE_OK;
}
@@ -480,33 +486,27 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
-static struct blk_mq_ops virtio_mq_ops = {
- .queue_rq = virtio_queue_rq,
- .map_queue = blk_mq_map_queue,
- .alloc_hctx = blk_mq_alloc_single_hw_queue,
- .free_hctx = blk_mq_free_single_hw_queue,
- .complete = virtblk_request_done,
-};
-
-static struct blk_mq_reg virtio_mq_reg = {
- .ops = &virtio_mq_ops,
- .nr_hw_queues = 1,
- .queue_depth = 0, /* Set in virtblk_probe */
- .numa_node = NUMA_NO_NODE,
- .flags = BLK_MQ_F_SHOULD_MERGE,
-};
-module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
-
-static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
- struct request *rq, unsigned int nr)
+static int virtblk_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
{
struct virtio_blk *vblk = data;
- struct virtblk_req *vbr = rq->special;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
+static struct blk_mq_ops virtio_mq_ops = {
+ .queue_rq = virtio_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .complete = virtblk_request_done,
+ .init_request = virtblk_init_request,
+};
+
+static unsigned int virtblk_queue_depth;
+module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
+
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -561,24 +561,34 @@ static int virtblk_probe(struct virtio_device *vdev)
}
/* Default queue sizing is to fill the ring. */
- if (!virtio_mq_reg.queue_depth) {
- virtio_mq_reg.queue_depth = vblk->vq->num_free;
+ if (!virtblk_queue_depth) {
+ virtblk_queue_depth = vblk->vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- virtio_mq_reg.queue_depth /= 2;
+ virtblk_queue_depth /= 2;
}
- virtio_mq_reg.cmd_size =
+
+ memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+ vblk->tag_set.ops = &virtio_mq_ops;
+ vblk->tag_set.nr_hw_queues = 1;
+ vblk->tag_set.queue_depth = virtblk_queue_depth;
+ vblk->tag_set.numa_node = NUMA_NO_NODE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ vblk->tag_set.driver_data = vblk;
+
+ err = blk_mq_alloc_tag_set(&vblk->tag_set);
+ if (err)
+ goto out_put_disk;
- q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
+ q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
if (!q) {
err = -ENOMEM;
- goto out_put_disk;
+ goto out_free_tags;
}
- blk_mq_init_commands(q, virtblk_init_vbr, vblk);
-
q->queuedata = vblk;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -679,6 +689,8 @@ static int virtblk_probe(struct virtio_device *vdev)
out_del_disk:
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
+out_free_tags:
+ blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:
put_disk(vblk->disk);
out_free_vq:
@@ -705,6 +717,8 @@ static void virtblk_remove(struct virtio_device *vdev)
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
+ blk_mq_free_tag_set(&vblk->tag_set);
+
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
@@ -749,7 +763,7 @@ static int virtblk_restore(struct virtio_device *vdev)
vblk->config_enable = true;
ret = init_vq(vdev->priv);
if (!ret)
- blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
return ret;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index be052773ad03..f65b807e3236 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -314,7 +314,7 @@ struct xen_blkif {
unsigned long long st_rd_sect;
unsigned long long st_wr_sect;
- wait_queue_head_t waiting_to_free;
+ struct work_struct free_work;
/* Thread shutdown wait queue. */
wait_queue_head_t shutdown_wq;
};
@@ -361,7 +361,7 @@ struct pending_req {
#define xen_blkif_put(_b) \
do { \
if (atomic_dec_and_test(&(_b)->refcnt)) \
- wake_up(&(_b)->waiting_to_free);\
+ schedule_work(&(_b)->free_work);\
} while (0)
struct phys_req {
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 9a547e6b6ebf..3a8b810b4980 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -35,12 +35,26 @@ static void connect(struct backend_info *);
static int connect_ring(struct backend_info *);
static void backend_changed(struct xenbus_watch *, const char **,
unsigned int);
+static void xen_blkif_free(struct xen_blkif *blkif);
+static void xen_vbd_free(struct xen_vbd *vbd);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
{
return be->dev;
}
+/*
+ * The last request could free the device from softirq context and
+ * xen_blkif_free() can sleep.
+ */
+static void xen_blkif_deferred_free(struct work_struct *work)
+{
+ struct xen_blkif *blkif;
+
+ blkif = container_of(work, struct xen_blkif, free_work);
+ xen_blkif_free(blkif);
+}
+
static int blkback_name(struct xen_blkif *blkif, char *buf)
{
char *devpath, *devname;
@@ -121,7 +135,6 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
init_completion(&blkif->drain_complete);
atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
- init_waitqueue_head(&blkif->waiting_to_free);
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
@@ -132,6 +145,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
+ INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
for (i = 0; i < XEN_BLKIF_REQS; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -231,7 +245,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
return 0;
}
-static void xen_blkif_disconnect(struct xen_blkif *blkif)
+static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
@@ -239,9 +253,12 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
blkif->xenblkd = NULL;
}
- atomic_dec(&blkif->refcnt);
- wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
- atomic_inc(&blkif->refcnt);
+ /* The above kthread_stop() guarantees that at this point we
+ * don't have any discard_io or other_io requests. So, checking
+ * for inflight IO is enough.
+ */
+ if (atomic_read(&blkif->inflight) > 0)
+ return -EBUSY;
if (blkif->irq) {
unbind_from_irqhandler(blkif->irq, blkif);
@@ -252,6 +269,8 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
}
+
+ return 0;
}
static void xen_blkif_free(struct xen_blkif *blkif)
@@ -259,8 +278,8 @@ static void xen_blkif_free(struct xen_blkif *blkif)
struct pending_req *req, *n;
int i = 0, j;
- if (!atomic_dec_and_test(&blkif->refcnt))
- BUG();
+ xen_blkif_disconnect(blkif);
+ xen_vbd_free(&blkif->vbd);
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(blkif);
@@ -449,16 +468,15 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
be->backend_watch.node = NULL;
}
+ dev_set_drvdata(&dev->dev, NULL);
+
if (be->blkif) {
xen_blkif_disconnect(be->blkif);
- xen_vbd_free(&be->blkif->vbd);
- xen_blkif_free(be->blkif);
- be->blkif = NULL;
+ xen_blkif_put(be->blkif);
}
kfree(be->mode);
kfree(be);
- dev_set_drvdata(&dev->dev, NULL);
return 0;
}
@@ -481,10 +499,15 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
int err;
- int state = 0;
+ int state = 0, discard_enable;
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
+ err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
+ &discard_enable);
+ if (err == 1 && !discard_enable)
+ return;
+
if (blk_queue_discard(q)) {
err = xenbus_printf(xbt, dev->nodename,
"discard-granularity", "%u",
@@ -700,7 +723,11 @@ static void frontend_changed(struct xenbus_device *dev,
* Enforce precondition before potential leak point.
* xen_blkif_disconnect() is idempotent.
*/
- xen_blkif_disconnect(be->blkif);
+ err = xen_blkif_disconnect(be->blkif);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "pending I/O");
+ break;
+ }
err = connect_ring(be);
if (err)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index efe1b4761735..5deb235bd18f 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq)
}
pr_debug("do_blk_req %p: cmd %p, sec %lx, "
- "(%u/%u) buffer:%p [%s]\n",
+ "(%u/%u) [%s]\n",
req, req->cmd, (unsigned long)blk_rq_pos(req),
blk_rq_cur_sectors(req), blk_rq_sectors(req),
- req->buffer, rq_data_dir(req) ? "write" : "read");
+ rq_data_dir(req) ? "write" : "read");
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
@@ -1635,36 +1635,24 @@ blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
int err;
- char *type;
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int discard_secure;
- type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
- if (IS_ERR(type))
- return;
-
- info->feature_secdiscard = 0;
- if (strncmp(type, "phy", 3) == 0) {
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->feature_discard = 1;
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%d", &discard_secure,
- NULL);
- if (!err)
- info->feature_secdiscard = discard_secure;
-
- } else if (strncmp(type, "file", 4) == 0)
- info->feature_discard = 1;
-
- kfree(type);
+ info->feature_discard = 1;
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "discard-granularity", "%u", &discard_granularity,
+ "discard-alignment", "%u", &discard_alignment,
+ NULL);
+ if (!err) {
+ info->discard_granularity = discard_granularity;
+ info->discard_alignment = discard_alignment;
+ }
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "discard-secure", "%d", &discard_secure,
+ NULL);
+ if (!err)
+ info->feature_secdiscard = !!discard_secure;
}
static int blkfront_setup_indirect(struct blkfront_info *info)
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1393b8871a28..ab3ea62e5dfc 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
rq_data_dir(req));
ace->req = req;
- ace->data_ptr = req->buffer;
+ ace->data_ptr = bio_data(req->bio);
ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
@@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
*/
- ace->data_ptr = ace->req->buffer;
+ ace->data_ptr = bio_data(ace->req->bio);
ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 27de5046708a..968f9e52effa 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q)
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
+ void *buffer = bio_data(req->bio);
+
if (len < size)
size = len;
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
if (rq_data_dir(req) == READ)
- memcpy(req->buffer, (char *)addr, size);
+ memcpy(buffer, (char *)addr, size);
else
- memcpy((char *)addr, req->buffer, size);
+ memcpy((char *)addr, buffer, size);
start += size;
len -= size;
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 552373c4e362..a118ec1650fa 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -4,6 +4,14 @@
menu "Bus devices"
+config BRCMSTB_GISB_ARB
+ bool "Broadcom STB GISB bus arbiter"
+ depends on ARM
+ help
+ Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+ arbiter. This driver provides timeout and target abort error handling
+ and internal bus master decoding.
+
config IMX_WEIM
bool "Freescale EIM DRIVER"
depends on ARCH_MXC
@@ -41,4 +49,14 @@ config ARM_CCI
help
Driver supporting the CCI cache coherent interconnect for ARM
platforms.
+
+config VEXPRESS_CONFIG
+ bool "Versatile Express configuration bus"
+ default y if ARCH_VEXPRESS
+ depends on ARM || ARM64
+ depends on OF
+ select REGMAP
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 8947bdd0de8b..6a4ea7e4af1a 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -2,6 +2,7 @@
# Makefile for the bus drivers.
#
+obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
@@ -10,3 +11,5 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
# CCI cache coherent interconnect for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
+
+obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 000000000000..6159b7752a64
--- /dev/null
+++ b/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+
+#include <asm/bug.h>
+#include <asm/signal.h>
+
+#define ARB_TIMER 0x008
+#define ARB_ERR_CAP_CLR 0x7e4
+#define ARB_ERR_CAP_CLEAR (1 << 0)
+#define ARB_ERR_CAP_HI_ADDR 0x7e8
+#define ARB_ERR_CAP_ADDR 0x7ec
+#define ARB_ERR_CAP_DATA 0x7f0
+#define ARB_ERR_CAP_STATUS 0x7f4
+#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
+#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
+#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
+#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
+#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
+#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+#define ARB_ERR_CAP_MASTER 0x7f8
+
+struct brcmstb_gisb_arb_device {
+ void __iomem *base;
+ struct mutex lock;
+ struct list_head next;
+ u32 valid_mask;
+ const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ u32 timeout;
+
+ mutex_lock(&gdev->lock);
+ timeout = ioread32(gdev->base + ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == 0 || val >= 0xffffffff)
+ return -EINVAL;
+
+ mutex_lock(&gdev->lock);
+ iowrite32(val, gdev->base + ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+ u32 masters)
+{
+ u32 mask = gdev->valid_mask & masters;
+
+ if (hweight_long(mask) != 1)
+ return NULL;
+
+ return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ const char *reason)
+{
+ u32 cap_status;
+ unsigned long arb_addr;
+ u32 master;
+ const char *m_name;
+ char m_fmt[11];
+
+ cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+ return 1;
+
+ /* Read the address and master */
+ arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff;
+#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32;
+#endif
+ master = ioread32(gdev->base + ARB_ERR_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ __func__, reason, arb_addr,
+ cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+ m_name);
+
+ /* clear the GISB error */
+ iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR);
+
+ return 0;
+}
+
+static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ /*
+ * If it was an imprecise abort, then we need to correct the
+ * return address to be _after_ the instruction.
+ */
+ if (fsr & (1 << 10))
+ regs->ARM_pc += 4;
+
+ return ret;
+}
+
+void __init brcmstb_hook_fault_code(void)
+{
+ hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
+ "imprecise external abort");
+}
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+ return IRQ_HANDLED;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+ gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+ &dev_attr_gisb_arb_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+ .attrs = gisb_arb_sysfs_attrs,
+};
+
+static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct brcmstb_gisb_arb_device *gdev;
+ struct resource *r;
+ int err, timeout_irq, tea_irq;
+ unsigned int num_masters, j = 0;
+ int i, first, last;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ timeout_irq = platform_get_irq(pdev, 0);
+ tea_irq = platform_get_irq(pdev, 1);
+
+ gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ mutex_init(&gdev->lock);
+ INIT_LIST_HEAD(&gdev->next);
+
+ gdev->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!gdev->base)
+ return -ENOMEM;
+
+ err = devm_request_irq(&pdev->dev, timeout_irq,
+ brcmstb_gisb_timeout_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, tea_irq,
+ brcmstb_gisb_tea_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ /* If we do not have a valid mask, assume all masters are enabled */
+ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+ &gdev->valid_mask))
+ gdev->valid_mask = 0xffffffff;
+
+ /* Proceed with reading the litteral names if we agree on the
+ * number of masters
+ */
+ num_masters = of_property_count_strings(dn,
+ "brcm,gisb-arb-master-names");
+ if (hweight_long(gdev->valid_mask) == num_masters) {
+ first = ffs(gdev->valid_mask) - 1;
+ last = fls(gdev->valid_mask) - 1;
+
+ for (i = first; i < last; i++) {
+ if (!(gdev->valid_mask & BIT(i)))
+ continue;
+
+ of_property_read_string_index(dn,
+ "brcm,gisb-arb-master-names", j,
+ &gdev->master_names[i]);
+ j++;
+ }
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, gdev);
+
+ list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+ dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
+ gdev->base, timeout_irq, tea_irq);
+
+ return 0;
+}
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+ { .compatible = "brcm,gisb-arb" },
+ { },
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+ .probe = brcmstb_gisb_arb_probe,
+ .driver = {
+ .name = "brcm-gisb-arb",
+ .owner = THIS_MODULE,
+ .of_match_table = brcmstb_gisb_arb_of_match,
+ },
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+ return platform_driver_register(&brcmstb_gisb_arb_driver);
+}
+
+module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 00b73448b22e..26c3779d871d 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -704,7 +704,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
phys_addr_t sdramwins_phys_base,
size_t sdramwins_size)
{
- struct device_node *np;
int win;
mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -717,12 +716,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
return -ENOMEM;
}
- np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
- if (np) {
- mbus->hw_io_coherency = 1;
- of_node_put(np);
- }
-
for (win = 0; win < mbus->soc->num_wins; win++)
mvebu_mbus_disable_window(mbus, win);
@@ -892,7 +885,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
}
}
-int __init mvebu_mbus_dt_init(void)
+int __init mvebu_mbus_dt_init(bool is_coherent)
{
struct resource mbuswins_res, sdramwins_res;
struct device_node *np, *controller;
@@ -930,6 +923,8 @@ int __init mvebu_mbus_dt_init(void)
return -EINVAL;
}
+ mbus_state.hw_io_coherency = is_coherent;
+
/* Get optional pcie-{mem,io}-aperture properties */
mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
&mbus_state.pcie_io_aperture);
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index feeecae623f6..531ae591783b 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,43 +1,45 @@
/*
- * OMAP4XXX L3 Interconnect error handling driver
+ * OMAP L3 Interconnect error handling driver
*
- * Copyright (C) 2011 Texas Corporation
+ * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "omap_l3_noc.h"
-/*
- * Interrupt Handler for L3 error detection.
- * 1) Identify the L3 clockdomain partition to which the error belongs to.
- * 2) Identify the slave where the error information is logged
- * 3) Print the logged information.
- * 4) Add dump stack to provide kernel trace.
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3: pointer to l3 struct
+ * @base: base address of clkdm
+ * @flag_mux: flagmux corresponding to the event
+ * @err_src: error source index of the slave (target)
*
- * Two Types of errors :
+ * This does the second part of the error interrupt handling:
+ * 3) Parse in the slave information
+ * 4) Print the logged information.
+ * 5) Add dump stack to provide kernel trace.
+ * 6) Clear the source if known.
+ *
+ * This handles two types of errors:
* 1) Custom errors in L3 :
* Target like DMM/FW/EMIF generates SRESP=ERR error
* 2) Standard L3 error:
@@ -53,214 +55,264 @@
* can be trapped as well. But the trapping is implemented as part
* secure software and hence need not be implemented here.
*/
-static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+ struct l3_flagmux_data *flag_mux, int err_src)
{
+ int k;
+ u32 std_err_main, clear, masterid;
+ u8 op_code, m_req_info;
+ void __iomem *l3_targ_base;
+ void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+ void __iomem *l3_targ_hdr, *l3_targ_info;
+ struct l3_target_data *l3_targ_inst;
+ struct l3_masters_data *master;
+ char *target_name, *master_name = "UN IDENTIFIED";
+ char *err_description;
+ char err_string[30] = { 0 };
+ char info_string[60] = { 0 };
+
+ /* We DONOT expect err_src to go out of bounds */
+ BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+ if (err_src < flag_mux->num_targ_data) {
+ l3_targ_inst = &flag_mux->l3_targ[err_src];
+ target_name = l3_targ_inst->name;
+ l3_targ_base = base + l3_targ_inst->offset;
+ } else {
+ target_name = L3_TARGET_NOT_SUPPORTED;
+ }
- struct omap4_l3 *l3 = _l3;
- int inttype, i, k;
+ if (target_name == L3_TARGET_NOT_SUPPORTED)
+ return -ENODEV;
+
+ /* Read the stderrlog_main_source from clk domain */
+ l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+ l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+ std_err_main = readl_relaxed(l3_targ_stderr);
+
+ switch (std_err_main & CUSTOM_ERROR) {
+ case STANDARD_ERROR:
+ err_description = "Standard";
+ snprintf(err_string, sizeof(err_string),
+ ": At Address: 0x%08X ",
+ readl_relaxed(l3_targ_slvofslsb));
+
+ l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+ break;
+
+ case CUSTOM_ERROR:
+ err_description = "Custom";
+
+ l3_targ_mstaddr = l3_targ_base +
+ L3_TARG_STDERRLOG_CINFO_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+ break;
+
+ default:
+ /* Nothing to be handled here as of now */
+ return 0;
+ }
+
+ /* STDERRLOG_MSTADDR Stores the NTTP master address. */
+ masterid = (readl_relaxed(l3_targ_mstaddr) &
+ l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+ for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+ k++, master++) {
+ if (masterid == master->id) {
+ master_name = master->name;
+ break;
+ }
+ }
+
+ op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+ m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+ snprintf(info_string, sizeof(info_string),
+ ": %s in %s mode during %s access",
+ (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+ (m_req_info & BIT(1)) ? "Supervisor" : "User",
+ (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+ WARN(true,
+ "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+ dev_name(l3->dev),
+ err_description,
+ master_name, target_name,
+ l3_transaction_type[op_code],
+ err_string, info_string);
+
+ /* clear the std error log*/
+ clear = std_err_main | CLEAR_STDERR_LOG;
+ writel_relaxed(clear, l3_targ_stderr);
+
+ return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq: irq number
+ * @_l3: pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ * 1) Identify the L3 clockdomain partition to which the error belongs to.
+ * 2) Identify the slave where the error information is logged
+ * ... handle the slave event..
+ * 7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+ struct omap_l3 *l3 = _l3;
+ int inttype, i, ret;
int err_src = 0;
- u32 std_err_main, err_reg, clear, masterid;
- void __iomem *base, *l3_targ_base;
- char *target_name, *master_name = "UN IDENTIFIED";
+ u32 err_reg, mask_val;
+ void __iomem *base, *mask_reg;
+ struct l3_flagmux_data *flag_mux;
/* Get the Type of interrupt */
inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
- for (i = 0; i < L3_MODULES; i++) {
+ for (i = 0; i < l3->num_modules; i++) {
/*
* Read the regerr register of the clock domain
* to determine the source
*/
base = l3->l3_base[i];
- err_reg = __raw_readl(base + l3_flagmux[i] +
- + L3_FLAGMUX_REGERR0 + (inttype << 3));
+ flag_mux = l3->l3_flagmux[i];
+ err_reg = readl_relaxed(base + flag_mux->offset +
+ L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+ err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+ flag_mux->mask_dbg_bits);
/* Get the corresponding error and analyse */
if (err_reg) {
/* Identify the source from control status register */
err_src = __ffs(err_reg);
- /* Read the stderrlog_main_source from clk domain */
- l3_targ_base = base + *(l3_targ[i] + err_src);
- std_err_main = __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- masterid = __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_MSTADDR);
-
- switch (std_err_main & CUSTOM_ERROR) {
- case STANDARD_ERROR:
- target_name =
- l3_targ_inst_name[i][err_src];
- WARN(true, "L3 standard error: TARGET:%s at address 0x%x\n",
- target_name,
- __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_SLVOFSLSB));
- /* clear the std error log*/
- clear = std_err_main | CLEAR_STDERR_LOG;
- writel(clear, l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- break;
-
- case CUSTOM_ERROR:
- target_name =
- l3_targ_inst_name[i][err_src];
- for (k = 0; k < NUM_OF_L3_MASTERS; k++) {
- if (masterid == l3_masters[k].id)
- master_name =
- l3_masters[k].name;
- }
- WARN(true, "L3 custom error: MASTER:%s TARGET:%s\n",
- master_name, target_name);
- /* clear the std error log*/
- clear = std_err_main | CLEAR_STDERR_LOG;
- writel(clear, l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- break;
-
- default:
- /* Nothing to be handled here as of now */
- break;
+ ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+ /*
+ * Certain plaforms may have "undocumented" status
+ * pending on boot. So dont generate a severe warning
+ * here. Just mask it off to prevent the error from
+ * reoccuring and locking up the system.
+ */
+ if (ret) {
+ dev_err(l3->dev,
+ "L3 %s error: target %d mod:%d %s\n",
+ inttype ? "debug" : "application",
+ err_src, i, "(unclearable)");
+
+ mask_reg = base + flag_mux->offset +
+ L3_FLAGMUX_MASK0 + (inttype << 3);
+ mask_val = readl_relaxed(mask_reg);
+ mask_val &= ~(1 << err_src);
+ writel_relaxed(mask_val, mask_reg);
+
+ /* Mark these bits as to be ignored */
+ if (inttype)
+ flag_mux->mask_app_bits |= 1 << err_src;
+ else
+ flag_mux->mask_dbg_bits |= 1 << err_src;
}
- /* Error found so break the for loop */
- break;
+
+ /* Error found so break the for loop */
+ break;
}
}
return IRQ_HANDLED;
}
-static int omap4_l3_probe(struct platform_device *pdev)
+static const struct of_device_id l3_noc_match[] = {
+ {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+ {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+ {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
{
- static struct omap4_l3 *l3;
- struct resource *res;
- int ret;
+ const struct of_device_id *of_id;
+ static struct omap_l3 *l3;
+ int ret, i, res_idx;
+
+ of_id = of_match_device(l3_noc_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
- l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+ l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
if (!l3)
return -ENOMEM;
+ memcpy(l3, of_id->data, sizeof(*l3));
+ l3->dev = &pdev->dev;
platform_set_drvdata(pdev, l3);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 0\n");
- ret = -ENODEV;
- goto err0;
- }
-
- l3->l3_base[0] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[0]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 1\n");
- ret = -ENODEV;
- goto err1;
- }
-
- l3->l3_base[1] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[1]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err1;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 2\n");
- ret = -ENODEV;
- goto err2;
- }
+ /* Get mem resources */
+ for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+ struct resource *res;
- l3->l3_base[2] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[2]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
+ if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+ /* First entry cannot be submodule */
+ BUG_ON(i == 0);
+ l3->l3_base[i] = l3->l3_base[i - 1];
+ continue;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+ l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3->l3_base[i])) {
+ dev_err(l3->dev, "ioremap %d failed\n", i);
+ return PTR_ERR(l3->l3_base[i]);
+ }
+ res_idx++;
}
/*
* Setup interrupt Handlers
*/
l3->debug_irq = platform_get_irq(pdev, 0);
- ret = request_irq(l3->debug_irq,
- l3_interrupt_handler,
- IRQF_DISABLED, "l3-dbg-irq", l3);
+ ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+ IRQF_DISABLED, "l3-dbg-irq", l3);
if (ret) {
- pr_crit("L3: request_irq failed to register for 0x%x\n",
- l3->debug_irq);
- goto err3;
+ dev_err(l3->dev, "request_irq failed for %d\n",
+ l3->debug_irq);
+ return ret;
}
l3->app_irq = platform_get_irq(pdev, 1);
- ret = request_irq(l3->app_irq,
- l3_interrupt_handler,
- IRQF_DISABLED, "l3-app-irq", l3);
- if (ret) {
- pr_crit("L3: request_irq failed to register for 0x%x\n",
- l3->app_irq);
- goto err4;
- }
+ ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+ IRQF_DISABLED, "l3-app-irq", l3);
+ if (ret)
+ dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
- return 0;
-
-err4:
- free_irq(l3->debug_irq, l3);
-err3:
- iounmap(l3->l3_base[2]);
-err2:
- iounmap(l3->l3_base[1]);
-err1:
- iounmap(l3->l3_base[0]);
-err0:
- kfree(l3);
return ret;
}
-static int omap4_l3_remove(struct platform_device *pdev)
-{
- struct omap4_l3 *l3 = platform_get_drvdata(pdev);
-
- free_irq(l3->app_irq, l3);
- free_irq(l3->debug_irq, l3);
- iounmap(l3->l3_base[0]);
- iounmap(l3->l3_base[1]);
- iounmap(l3->l3_base[2]);
- kfree(l3);
-
- return 0;
-}
-
-#if defined(CONFIG_OF)
-static const struct of_device_id l3_noc_match[] = {
- {.compatible = "ti,omap4-l3-noc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, l3_noc_match);
-#else
-#define l3_noc_match NULL
-#endif
-
-static struct platform_driver omap4_l3_driver = {
- .probe = omap4_l3_probe,
- .remove = omap4_l3_remove,
+static struct platform_driver omap_l3_driver = {
+ .probe = omap_l3_probe,
.driver = {
.name = "omap_l3_noc",
.owner = THIS_MODULE,
- .of_match_table = l3_noc_match,
+ .of_match_table = of_match_ptr(l3_noc_match),
},
};
-static int __init omap4_l3_init(void)
+static int __init omap_l3_init(void)
{
- return platform_driver_register(&omap4_l3_driver);
+ return platform_driver_register(&omap_l3_driver);
}
-postcore_initcall_sync(omap4_l3_init);
+postcore_initcall_sync(omap_l3_init);
-static void __exit omap4_l3_exit(void)
+static void __exit omap_l3_exit(void)
{
- platform_driver_unregister(&omap4_l3_driver);
+ platform_driver_unregister(&omap_l3_driver);
}
-module_exit(omap4_l3_exit);
+module_exit(omap_l3_exit);
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index a6ce34dc4814..551e01061434 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,29 +1,25 @@
/*
- * OMAP4XXX L3 Interconnect error handling driver header
+ * OMAP L3 Interconnect error handling driver header
*
- * Copyright (C) 2011 Texas Corporation
+ * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
-#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
-#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES 3
+#define MAX_CLKDM_TARGETS 31
-#define L3_MODULES 3
#define CLEAR_STDERR_LOG (1 << 31)
#define CUSTOM_ERROR 0x2
#define STANDARD_ERROR 0x0
@@ -33,63 +29,165 @@
/* L3 TARG register offsets */
#define L3_TARG_STDERRLOG_MAIN 0x48
+#define L3_TARG_STDERRLOG_HDR 0x4c
+#define L3_TARG_STDERRLOG_MSTADDR 0x50
+#define L3_TARG_STDERRLOG_INFO 0x58
#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
-#define L3_TARG_STDERRLOG_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
#define L3_FLAGMUX_REGERR0 0xc
+#define L3_FLAGMUX_MASK0 0x8
+
+#define L3_TARGET_NOT_SUPPORTED NULL
+
+#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+ /* 0 0 0 */ "Idle",
+ /* 0 0 1 */ "Write",
+ /* 0 1 0 */ "Read",
+ /* 0 1 1 */ "ReadEx",
+ /* 1 0 0 */ "Read Link",
+ /* 1 0 1 */ "Write Non-Posted",
+ /* 1 1 0 */ "Write Conditional",
+ /* 1 1 1 */ "Write Broadcast",
+};
-#define NUM_OF_L3_MASTERS (sizeof(l3_masters)/sizeof(l3_masters[0]))
-
-static u32 l3_flagmux[L3_MODULES] = {
- 0x500,
- 0x1000,
- 0X0200
-};
-
-/* L3 Target standard Error register offsets */
-static u32 l3_targ_inst_clk1[] = {
- 0x100, /* DMM1 */
- 0x200, /* DMM2 */
- 0x300, /* ABE */
- 0x400, /* L4CFG */
- 0x600, /* CLK2 PWR DISC */
- 0x0, /* Host CLK1 */
- 0x900 /* L4 Wakeup */
-};
-
-static u32 l3_targ_inst_clk2[] = {
- 0x500, /* CORTEX M3 */
- 0x300, /* DSS */
- 0x100, /* GPMC */
- 0x400, /* ISS */
- 0x700, /* IVAHD */
- 0xD00, /* missing in TRM corresponds to AES1*/
- 0x900, /* L4 PER0*/
- 0x200, /* OCMRAM */
- 0x100, /* missing in TRM corresponds to GPMC sERROR*/
- 0x600, /* SGX */
- 0x800, /* SL2 */
- 0x1600, /* C2C */
- 0x1100, /* missing in TRM corresponds PWR DISC CLK1*/
- 0xF00, /* missing in TRM corrsponds to SHA1*/
- 0xE00, /* missing in TRM corresponds to AES2*/
- 0xC00, /* L4 PER3 */
- 0xA00, /* L4 PER1*/
- 0xB00, /* L4 PER2*/
- 0x0, /* HOST CLK2 */
- 0x1800, /* CAL */
- 0x1700 /* LLI */
-};
-
-static u32 l3_targ_inst_clk3[] = {
- 0x0100 /* EMUSS */,
- 0x0300, /* DEBUGSS_CT_TBR */
- 0x0 /* HOST CLK3 */
-};
-
-static struct l3_masters_data {
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id: ID of the L3 Master
+ * @name: master name
+ */
+struct l3_masters_data {
u32 id;
- char name[10];
-} l3_masters[] = {
+ char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset: Offset from base for L3 Target
+ * @name: Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+ u32 offset;
+ char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset: offset from base for flagmux register
+ * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
+ * target data. unsupported ones are marked with
+ * L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+ u32 offset;
+ struct l3_target_data *l3_targ;
+ u8 num_targ_data;
+ u32 mask_app_bits;
+ u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev: device representing the bus (populated runtime)
+ * @l3_base: base addresses of modules (populated runtime if 0)
+ * if set to L3_BASE_IS_SUBMODULE, then uses previous
+ * module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ * offset from corresponding module base indexed per
+ * module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters: array pointing to master data containing name and register
+ * offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq: irq number of the debug interrupt (populated runtime)
+ * @app_irq: irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+ struct device *dev;
+
+ void __iomem *l3_base[MAX_L3_MODULES];
+ struct l3_flagmux_data **l3_flagmux;
+ int num_modules;
+
+ struct l3_masters_data *l3_masters;
+ int num_masters;
+ u32 mst_addr_mask;
+
+ int debug_irq;
+ int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+ {0x100, "DMM1",},
+ {0x200, "DMM2",},
+ {0x300, "ABE",},
+ {0x400, "L4CFG",},
+ {0x600, "CLK2PWRDISC",},
+ {0x0, "HOSTCLK1",},
+ {0x900, "L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+ .offset = 0x500,
+ .l3_targ = omap_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+ {0x500, "CORTEXM3",},
+ {0x300, "DSS",},
+ {0x100, "GPMC",},
+ {0x400, "ISS",},
+ {0x700, "IVAHD",},
+ {0xD00, "AES1",},
+ {0x900, "L4PER0",},
+ {0x200, "OCMRAM",},
+ {0x100, "GPMCsERROR",},
+ {0x600, "SGX",},
+ {0x800, "SL2",},
+ {0x1600, "C2C",},
+ {0x1100, "PWRDISCCLK1",},
+ {0xF00, "SHA1",},
+ {0xE00, "AES2",},
+ {0xC00, "L4PER3",},
+ {0xA00, "L4PER1",},
+ {0xB00, "L4PER2",},
+ {0x0, "HOSTCLK2",},
+ {0x1800, "CAL",},
+ {0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+ .offset = 0x1000,
+ .l3_targ = omap_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk3[] = {
+ {0x0100, "EMUSS",},
+ {0x0300, "DEBUG SOURCE",},
+ {0x0, "HOST CLK3",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
{ 0x0 , "MPU"},
{ 0x10, "CS_ADP"},
{ 0x14, "xxx"},
@@ -117,60 +215,261 @@ static struct l3_masters_data {
{ 0xC8, "USBHOSTFS"}
};
-static char *l3_targ_inst_name[L3_MODULES][21] = {
- {
- "DMM1",
- "DMM2",
- "ABE",
- "L4CFG",
- "CLK2 PWR DISC",
- "HOST CLK1",
- "L4 WAKEUP"
- },
- {
- "CORTEX M3" ,
- "DSS ",
- "GPMC ",
- "ISS ",
- "IVAHD ",
- "AES1",
- "L4 PER0",
- "OCMRAM ",
- "GPMC sERROR",
- "SGX ",
- "SL2 ",
- "C2C ",
- "PWR DISC CLK1",
- "SHA1",
- "AES2",
- "L4 PER3",
- "L4 PER1",
- "L4 PER2",
- "HOST CLK2",
- "CAL",
- "LLI"
- },
- {
- "EMUSS",
- "DEBUG SOURCE",
- "HOST CLK3"
- },
-};
-
-static u32 *l3_targ[L3_MODULES] = {
- l3_targ_inst_clk1,
- l3_targ_inst_clk2,
- l3_targ_inst_clk3,
-};
-
-struct omap4_l3 {
- struct device *dev;
- struct clk *ick;
+static struct l3_flagmux_data *omap_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap_l3_data = {
+ .l3_flagmux = omap_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
- /* memory base */
- void __iomem *l3_base[L3_MODULES];
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+ {0x2a00, "AES1",},
+ {0x0200, "DMM_P1",},
+ {0x0600, "DSP2_SDMA",},
+ {0x0b00, "EVE2",},
+ {0x1300, "DMM_P2",},
+ {0x2c00, "AES2",},
+ {0x0300, "DSP1_SDMA",},
+ {0x0a00, "EVE1",},
+ {0x0c00, "EVE3",},
+ {0x0d00, "EVE4",},
+ {0x2900, "DSS",},
+ {0x0100, "GPMC",},
+ {0x3700, "PCIE1",},
+ {0x1600, "IVA_CONFIG",},
+ {0x1800, "IVA_SL2IF",},
+ {0x0500, "L4_CFG",},
+ {0x1d00, "L4_WKUP",},
+ {0x3800, "PCIE2",},
+ {0x3300, "SHA2_1",},
+ {0x1200, "GPU",},
+ {0x1000, "IPU1",},
+ {0x1100, "IPU2",},
+ {0x2000, "TPCC_EDMA",},
+ {0x2e00, "TPTC1_EDMA",},
+ {0x2b00, "TPTC2_EDMA",},
+ {0x0700, "VCP1",},
+ {0x2500, "L4_PER2_P3",},
+ {0x0e00, "L4_PER3_P3",},
+ {0x2200, "MMU1",},
+ {0x1400, "PRUSS1",},
+ {0x1500, "PRUSS2"},
+ {0x0800, "VCP1",},
+};
- int debug_irq;
- int app_irq;
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+ .offset = 0x803500,
+ .l3_targ = dra_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+ {0x0, "HOST CLK1",},
+ {0x0, "HOST CLK2",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x3400, "SHA2_2",},
+ {0x0900, "BB2D",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x2100, "L4_PER1_P3",},
+ {0x1c00, "L4_PER1_P1",},
+ {0x1f00, "L4_PER1_P2",},
+ {0x2300, "L4_PER2_P1",},
+ {0x2400, "L4_PER2_P2",},
+ {0x2600, "L4_PER3_P1",},
+ {0x2700, "L4_PER3_P2",},
+ {0x2f00, "MCASP1",},
+ {0x3000, "MCASP2",},
+ {0x3100, "MCASP3",},
+ {0x2800, "MMU2",},
+ {0x0f00, "OCMC_RAM1",},
+ {0x1700, "OCMC_RAM2",},
+ {0x1900, "OCMC_RAM3",},
+ {0x1e00, "OCMC_ROM",},
+ {0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+ .offset = 0x803600,
+ .l3_targ = dra_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+ {0x0100, "L3_INSTR"},
+ {0x0300, "DEBUGSS_CT_TBR"},
+ {0x0, "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+ .offset = 0x200,
+ .l3_targ = dra_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+ { 0x0, "MPU" },
+ { 0x4, "CS_DAP" },
+ { 0x5, "IEEE1500_2_OCP" },
+ { 0x8, "DSP1_MDMA" },
+ { 0x9, "DSP1_CFG" },
+ { 0xA, "DSP1_DMA" },
+ { 0xB, "DSP2_MDMA" },
+ { 0xC, "DSP2_CFG" },
+ { 0xD, "DSP2_DMA" },
+ { 0xE, "IVA" },
+ { 0x10, "EVE1_P1" },
+ { 0x11, "EVE2_P1" },
+ { 0x12, "EVE3_P1" },
+ { 0x13, "EVE4_P1" },
+ { 0x14, "PRUSS1 PRU1" },
+ { 0x15, "PRUSS1 PRU2" },
+ { 0x16, "PRUSS2 PRU1" },
+ { 0x17, "PRUSS2 PRU2" },
+ { 0x18, "IPU1" },
+ { 0x19, "IPU2" },
+ { 0x1A, "SDMA" },
+ { 0x1B, "CDMA" },
+ { 0x1C, "TC1_EDMA" },
+ { 0x1D, "TC2_EDMA" },
+ { 0x20, "DSS" },
+ { 0x21, "MMU1" },
+ { 0x22, "PCIE1" },
+ { 0x23, "MMU2" },
+ { 0x24, "VIP1" },
+ { 0x25, "VIP2" },
+ { 0x26, "VIP3" },
+ { 0x27, "VPE" },
+ { 0x28, "GPU_P1" },
+ { 0x29, "BB2D" },
+ { 0x29, "GPU_P2" },
+ { 0x2B, "GMAC_SW" },
+ { 0x2C, "USB3" },
+ { 0x2D, "USB2_SS" },
+ { 0x2E, "USB2_ULPI_SS1" },
+ { 0x2F, "USB2_ULPI_SS2" },
+ { 0x30, "CSI2_1" },
+ { 0x31, "CSI2_2" },
+ { 0x33, "SATA" },
+ { 0x34, "EVE1_P2" },
+ { 0x35, "EVE2_P2" },
+ { 0x36, "EVE3_P2" },
+ { 0x37, "EVE4_P2" }
};
-#endif
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+ &dra_l3_flagmux_clk1,
+ &dra_l3_flagmux_clk2,
+ &dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+ .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+ .l3_flagmux = dra_l3_flagmux,
+ .num_modules = ARRAY_SIZE(dra_l3_flagmux),
+ .l3_masters = dra_l3_masters,
+ .num_masters = ARRAY_SIZE(dra_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+ {0xf00, "EMIF",},
+ {0x1200, "DES",},
+ {0x400, "OCMCRAM",},
+ {0x700, "TPTC0",},
+ {0x800, "TPTC1",},
+ {0x900, "TPTC2"},
+ {0xb00, "TPCC",},
+ {0xd00, "DEBUGSS",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x200, "SHA",},
+ {0xc00, "SGX530",},
+ {0x500, "AES0",},
+ {0xa00, "L4_FAST",},
+ {0x300, "MPUSS_L2_RAM",},
+ {0x100, "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+ .offset = 0x1000,
+ .l3_targ = am4372_l3_target_data_200f,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+ {0x100, "L4_PER_0",},
+ {0x200, "L4_PER_1",},
+ {0x300, "L4_PER_2",},
+ {0x400, "L4_PER_3",},
+ {0x800, "McASP0",},
+ {0x900, "McASP1",},
+ {0xC00, "MMCHS2",},
+ {0x700, "GPMC",},
+ {0xD00, "L4_FW",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x500, "ADCTSC",},
+ {0xE00, "L4_WKUP",},
+ {0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+ .offset = 0x600,
+ .l3_targ = am4372_l3_target_data_100s,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+ { 0x0, "M1 (128-bit)"},
+ { 0x1, "M2 (64-bit)"},
+ { 0x4, "DAP"},
+ { 0x5, "P1500"},
+ { 0xC, "ICSS0"},
+ { 0xD, "ICSS1"},
+ { 0x14, "Wakeup Processor"},
+ { 0x18, "TPTC0 Read"},
+ { 0x19, "TPTC0 Write"},
+ { 0x1A, "TPTC1 Read"},
+ { 0x1B, "TPTC1 Write"},
+ { 0x1C, "TPTC2 Read"},
+ { 0x1D, "TPTC2 Write"},
+ { 0x20, "SGX530"},
+ { 0x21, "OCP WP Traffic Probe"},
+ { 0x22, "OCP WP DMA Profiling"},
+ { 0x23, "OCP WP Event Trace"},
+ { 0x25, "DSS"},
+ { 0x28, "Crypto DMA RD"},
+ { 0x29, "Crypto DMA WR"},
+ { 0x2C, "VPFE0"},
+ { 0x2D, "VPFE1"},
+ { 0x30, "GEMAC"},
+ { 0x34, "USB0 RD"},
+ { 0x35, "USB0 WR"},
+ { 0x36, "USB1 RD"},
+ { 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+ &am4372_l3_flagmux_200f,
+ &am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+ .l3_flagmux = am4372_l3_flagmux,
+ .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+ .l3_masters = am4372_l3_masters,
+ .num_masters = ARRAY_SIZE(am4372_l3_masters),
+ /* All 6 bits of register field used to distinguish initiator */
+ .mst_addr_mask = 0x3F,
+};
+
+#endif /* __OMAP_L3_NOC_H */
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
new file mode 100644
index 000000000000..a64763b6b5fd
--- /dev/null
+++ b/drivers/bus/vexpress-config.c
@@ -0,0 +1,202 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/vexpress.h>
+
+
+struct vexpress_config_bridge {
+ struct vexpress_config_bridge_ops *ops;
+ void *context;
+};
+
+
+static DEFINE_MUTEX(vexpress_config_mutex);
+static struct class *vexpress_config_class;
+static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
+
+
+void vexpress_config_set_master(u32 site)
+{
+ vexpress_config_site_master = site;
+}
+
+u32 vexpress_config_get_master(void)
+{
+ return vexpress_config_site_master;
+}
+
+void vexpress_config_lock(void *arg)
+{
+ mutex_lock(&vexpress_config_mutex);
+}
+
+void vexpress_config_unlock(void *arg)
+{
+ mutex_unlock(&vexpress_config_mutex);
+}
+
+
+static void vexpress_config_find_prop(struct device_node *node,
+ const char *name, u32 *val)
+{
+ /* Default value */
+ *val = 0;
+
+ of_node_get(node);
+ while (node) {
+ if (of_property_read_u32(node, name, val) == 0) {
+ of_node_put(node);
+ return;
+ }
+ node = of_get_next_parent(node);
+ }
+}
+
+int vexpress_config_get_topo(struct device_node *node, u32 *site,
+ u32 *position, u32 *dcc)
+{
+ vexpress_config_find_prop(node, "arm,vexpress,site", site);
+ if (*site == VEXPRESS_SITE_MASTER)
+ *site = vexpress_config_site_master;
+ if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
+ return -EINVAL;
+ vexpress_config_find_prop(node, "arm,vexpress,position", position);
+ vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
+
+ return 0;
+}
+
+
+static void vexpress_config_devres_release(struct device *dev, void *res)
+{
+ struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = res;
+
+ bridge->ops->regmap_exit(regmap, bridge->context);
+}
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
+{
+ struct vexpress_config_bridge *bridge;
+ struct regmap *regmap;
+ struct regmap **res;
+
+ if (WARN_ON(dev->parent->class != vexpress_config_class))
+ return ERR_PTR(-ENODEV);
+
+ bridge = dev_get_drvdata(dev->parent);
+ if (WARN_ON(!bridge))
+ return ERR_PTR(-EINVAL);
+
+ res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
+ GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = bridge->ops->regmap_init(dev, bridge->context);
+ if (IS_ERR(regmap)) {
+ devres_free(res);
+ return regmap;
+ }
+
+ *res = regmap;
+ devres_add(dev, res);
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
+
+struct device *vexpress_config_bridge_register(struct device *parent,
+ struct vexpress_config_bridge_ops *ops, void *context)
+{
+ struct device *dev;
+ struct vexpress_config_bridge *bridge;
+
+ if (!vexpress_config_class) {
+ vexpress_config_class = class_create(THIS_MODULE,
+ "vexpress-config");
+ if (IS_ERR(vexpress_config_class))
+ return (void *)vexpress_config_class;
+ }
+
+ dev = device_create(vexpress_config_class, parent, 0,
+ NULL, "%s.bridge", dev_name(parent));
+
+ if (IS_ERR(dev))
+ return dev;
+
+ bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ put_device(dev);
+ device_unregister(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ bridge->ops = ops;
+ bridge->context = context;
+
+ dev_set_drvdata(dev, bridge);
+
+ dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
+ dev_name(dev), parent->of_node);
+
+ return dev;
+}
+
+
+static int vexpress_config_node_match(struct device *dev, const void *data)
+{
+ const struct device_node *node = data;
+
+ dev_dbg(dev, "Parent node %p, looking for %p\n",
+ dev->parent->of_node, node);
+
+ return dev->parent->of_node == node;
+}
+
+static int vexpress_config_populate(struct device_node *node)
+{
+ struct device_node *bridge;
+ struct device *parent;
+
+ bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (!bridge)
+ return -EINVAL;
+
+ parent = class_find_device(vexpress_config_class, NULL, bridge,
+ vexpress_config_node_match);
+ if (WARN_ON(!parent))
+ return -ENODEV;
+
+ return of_platform_populate(node, NULL, NULL, parent);
+}
+
+static int __init vexpress_config_init(void)
+{
+ int err = 0;
+ struct device_node *node;
+
+ /* Need the config devices early, before the "normal" devices... */
+ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
+ err = vexpress_config_populate(node);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+postcore_initcall(vexpress_config_init);
+
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8a3aff724d98..49ac5662585b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -312,36 +312,24 @@ static const char *mrw_format_status[] = {
static const char *mrw_address_space[] = { "DMA", "GAA" };
-#if (ERRLOGMASK!=CD_NOTHING)
-#define cdinfo(type, fmt, args...) \
+#if (ERRLOGMASK != CD_NOTHING)
+#define cd_dbg(type, fmt, ...) \
do { \
if ((ERRLOGMASK & type) || debug == 1) \
- pr_info(fmt, ##args); \
+ pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#else
-#define cdinfo(type, fmt, args...) \
+#define cd_dbg(type, fmt, ...) \
do { \
if (0 && (ERRLOGMASK & type) || debug == 1) \
- pr_info(fmt, ##args); \
+ pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#endif
-/* These are used to simplify getting data in from and back to user land */
-#define IOCTL_IN(arg, type, in) \
- if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \
- return -EFAULT;
-
-#define IOCTL_OUT(arg, type, out) \
- if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \
- return -EFAULT;
-
/* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in
a lot of places. This macro makes the code more clear. */
#define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type))
-/* used in the audio ioctls */
-#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret
-
/*
* Another popular OS uses 7 seconds as the hard timeout for default
* commands, so it is a good choice for us as well.
@@ -349,21 +337,6 @@ do { \
#define CDROM_DEF_TIMEOUT (7 * HZ)
/* Not-exported routines. */
-static int open_for_data(struct cdrom_device_info * cdi);
-static int check_for_audio_disc(struct cdrom_device_info * cdi,
- struct cdrom_device_ops * cdo);
-static void sanitize_format(union cdrom_addr *addr,
- u_char * curr, u_char requested);
-static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
- unsigned long arg);
-
-int cdrom_get_last_written(struct cdrom_device_info *, long *);
-static int cdrom_get_next_writable(struct cdrom_device_info *, long *);
-static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*);
-
-static int cdrom_mrw_exit(struct cdrom_device_info *cdi);
-
-static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di);
static void cdrom_sysctl_register(void);
@@ -382,113 +355,65 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
return -EIO;
}
-/* This macro makes sure we don't have to check on cdrom_device_ops
- * existence in the run-time routines below. Change_capability is a
- * hack to have the capability flags defined const, while we can still
- * change it here without gcc complaining at every line.
- */
-#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits)
-
-int register_cdrom(struct cdrom_device_info *cdi)
-{
- static char banner_printed;
- struct cdrom_device_ops *cdo = cdi->ops;
- int *change_capability = (int *)&cdo->capability; /* hack */
-
- cdinfo(CD_OPEN, "entering register_cdrom\n");
-
- if (cdo->open == NULL || cdo->release == NULL)
- return -EINVAL;
- if (!banner_printed) {
- pr_info("Uniform CD-ROM driver " REVISION "\n");
- banner_printed = 1;
- cdrom_sysctl_register();
- }
-
- ENSURE(drive_status, CDC_DRIVE_STATUS );
- if (cdo->check_events == NULL && cdo->media_changed == NULL)
- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
- ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
- ENSURE(lock_door, CDC_LOCK);
- ENSURE(select_speed, CDC_SELECT_SPEED);
- ENSURE(get_last_session, CDC_MULTI_SESSION);
- ENSURE(get_mcn, CDC_MCN);
- ENSURE(reset, CDC_RESET);
- ENSURE(generic_packet, CDC_GENERIC_PACKET);
- cdi->mc_flags = 0;
- cdo->n_minors = 0;
- cdi->options = CDO_USE_FFLAGS;
-
- if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
- cdi->options |= (int) CDO_AUTO_CLOSE;
- if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY))
- cdi->options |= (int) CDO_AUTO_EJECT;
- if (lockdoor==1)
- cdi->options |= (int) CDO_LOCK;
- if (check_media_type==1)
- cdi->options |= (int) CDO_CHECK_TYPE;
-
- if (CDROM_CAN(CDC_MRW_W))
- cdi->exit = cdrom_mrw_exit;
-
- if (cdi->disk)
- cdi->cdda_method = CDDA_BPC_FULL;
- else
- cdi->cdda_method = CDDA_OLD;
-
- if (!cdo->generic_packet)
- cdo->generic_packet = cdrom_dummy_generic_packet;
-
- cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
- mutex_lock(&cdrom_mutex);
- list_add(&cdi->list, &cdrom_list);
- mutex_unlock(&cdrom_mutex);
- return 0;
-}
-#undef ENSURE
-
-void unregister_cdrom(struct cdrom_device_info *cdi)
+static int cdrom_flush_cache(struct cdrom_device_info *cdi)
{
- cdinfo(CD_OPEN, "entering unregister_cdrom\n");
+ struct packet_command cgc;
- mutex_lock(&cdrom_mutex);
- list_del(&cdi->list);
- mutex_unlock(&cdrom_mutex);
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
- if (cdi->exit)
- cdi->exit(cdi);
+ cgc.timeout = 5 * 60 * HZ;
- cdi->ops->n_minors--;
- cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+ return cdi->ops->generic_packet(cdi, &cgc);
}
-int cdrom_get_media_event(struct cdrom_device_info *cdi,
- struct media_event_desc *med)
+/* requires CD R/RW */
+static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
+ disc_information *di)
{
+ struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
- unsigned char buffer[8];
- struct event_header *eh = (struct event_header *) buffer;
+ int ret, buflen;
- init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
- cgc.cmd[1] = 1; /* IMMED */
- cgc.cmd[4] = 1 << 4; /* media event */
- cgc.cmd[8] = sizeof(buffer);
+ /* set up command and get the disc info */
+ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+ cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
- if (cdi->ops->generic_packet(cdi, &cgc))
- return 1;
+ ret = cdo->generic_packet(cdi, &cgc);
+ if (ret)
+ return ret;
- if (be16_to_cpu(eh->data_len) < sizeof(*med))
- return 1;
+ /* not all drives have the same disc_info length, so requeue
+ * packet with the length the drive tells us it can supply
+ */
+ buflen = be16_to_cpu(di->disc_information_length) +
+ sizeof(di->disc_information_length);
- if (eh->nea || eh->notification_class != 0x4)
- return 1;
+ if (buflen > sizeof(disc_information))
+ buflen = sizeof(disc_information);
- memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
- return 0;
+ cgc.cmd[8] = cgc.buflen = buflen;
+ ret = cdo->generic_packet(cdi, &cgc);
+ if (ret)
+ return ret;
+
+ /* return actual fill size */
+ return buflen;
}
+/* This macro makes sure we don't have to check on cdrom_device_ops
+ * existence in the run-time routines below. Change_capability is a
+ * hack to have the capability flags defined const, while we can still
+ * change it here without gcc complaining at every line.
+ */
+#define ENSURE(call, bits) \
+do { \
+ if (cdo->call == NULL) \
+ *change_capability &= ~(bits); \
+} while (0)
+
/*
* the first prototypes used 0x2c as the page code for the mrw mode page,
* subsequently this was changed to 0x03. probe the one used by this drive
@@ -605,18 +530,6 @@ static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed)
return cdi->ops->generic_packet(cdi, &cgc);
}
-static int cdrom_flush_cache(struct cdrom_device_info *cdi)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_FLUSH_CACHE;
-
- cgc.timeout = 5 * 60 * HZ;
-
- return cdi->ops->generic_packet(cdi, &cgc);
-}
-
static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
{
disc_information di;
@@ -650,17 +563,19 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
cgc.buffer = buffer;
cgc.buflen = sizeof(buffer);
- if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
+ ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0);
+ if (ret)
return ret;
- mph = (struct mode_page_header *) buffer;
+ mph = (struct mode_page_header *)buffer;
offset = be16_to_cpu(mph->desc_length);
size = be16_to_cpu(mph->mode_data_length) + 2;
buffer[offset + 3] = space;
cgc.buflen = size;
- if ((ret = cdrom_mode_select(cdi, &cgc)))
+ ret = cdrom_mode_select(cdi, &cgc);
+ if (ret)
return ret;
pr_info("%s: mrw address space %s selected\n",
@@ -668,6 +583,106 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
return 0;
}
+int register_cdrom(struct cdrom_device_info *cdi)
+{
+ static char banner_printed;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ int *change_capability = (int *)&cdo->capability; /* hack */
+
+ cd_dbg(CD_OPEN, "entering register_cdrom\n");
+
+ if (cdo->open == NULL || cdo->release == NULL)
+ return -EINVAL;
+ if (!banner_printed) {
+ pr_info("Uniform CD-ROM driver " REVISION "\n");
+ banner_printed = 1;
+ cdrom_sysctl_register();
+ }
+
+ ENSURE(drive_status, CDC_DRIVE_STATUS);
+ if (cdo->check_events == NULL && cdo->media_changed == NULL)
+ *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
+ ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
+ ENSURE(lock_door, CDC_LOCK);
+ ENSURE(select_speed, CDC_SELECT_SPEED);
+ ENSURE(get_last_session, CDC_MULTI_SESSION);
+ ENSURE(get_mcn, CDC_MCN);
+ ENSURE(reset, CDC_RESET);
+ ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ cdi->mc_flags = 0;
+ cdo->n_minors = 0;
+ cdi->options = CDO_USE_FFLAGS;
+
+ if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
+ cdi->options |= (int) CDO_AUTO_CLOSE;
+ if (autoeject == 1 && CDROM_CAN(CDC_OPEN_TRAY))
+ cdi->options |= (int) CDO_AUTO_EJECT;
+ if (lockdoor == 1)
+ cdi->options |= (int) CDO_LOCK;
+ if (check_media_type == 1)
+ cdi->options |= (int) CDO_CHECK_TYPE;
+
+ if (CDROM_CAN(CDC_MRW_W))
+ cdi->exit = cdrom_mrw_exit;
+
+ if (cdi->disk)
+ cdi->cdda_method = CDDA_BPC_FULL;
+ else
+ cdi->cdda_method = CDDA_OLD;
+
+ if (!cdo->generic_packet)
+ cdo->generic_packet = cdrom_dummy_generic_packet;
+
+ cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+ mutex_lock(&cdrom_mutex);
+ list_add(&cdi->list, &cdrom_list);
+ mutex_unlock(&cdrom_mutex);
+ return 0;
+}
+#undef ENSURE
+
+void unregister_cdrom(struct cdrom_device_info *cdi)
+{
+ cd_dbg(CD_OPEN, "entering unregister_cdrom\n");
+
+ mutex_lock(&cdrom_mutex);
+ list_del(&cdi->list);
+ mutex_unlock(&cdrom_mutex);
+
+ if (cdi->exit)
+ cdi->exit(cdi);
+
+ cdi->ops->n_minors--;
+ cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+}
+
+int cdrom_get_media_event(struct cdrom_device_info *cdi,
+ struct media_event_desc *med)
+{
+ struct packet_command cgc;
+ unsigned char buffer[8];
+ struct event_header *eh = (struct event_header *)buffer;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
+ cgc.cmd[1] = 1; /* IMMED */
+ cgc.cmd[4] = 1 << 4; /* media event */
+ cgc.cmd[8] = sizeof(buffer);
+ cgc.quiet = 1;
+
+ if (cdi->ops->generic_packet(cdi, &cgc))
+ return 1;
+
+ if (be16_to_cpu(eh->data_len) < sizeof(*med))
+ return 1;
+
+ if (eh->nea || eh->notification_class != 0x4)
+ return 1;
+
+ memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
+ return 0;
+}
+
static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
struct rwrt_feature_desc *rfd)
{
@@ -839,7 +854,7 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
else if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
ret = !rfd.curr;
- cdinfo(CD_OPEN, "can open for random write\n");
+ cd_dbg(CD_OPEN, "can open for random write\n");
return ret;
}
@@ -928,12 +943,12 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
struct packet_command cgc;
if (cdi->mmc3_profile != 0x1a) {
- cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
+ cd_dbg(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
return;
}
if (!cdi->media_written) {
- cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
+ cd_dbg(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
return;
}
@@ -969,82 +984,74 @@ static int cdrom_close_write(struct cdrom_device_info *cdi)
#endif
}
-/* We use the open-option O_NONBLOCK to indicate that the
- * purpose of opening is only for subsequent ioctl() calls; no device
- * integrity checks are performed.
- *
- * We hope that all cd-player programs will adopt this convention. It
- * is in their own interest: device control becomes a lot easier
- * this way.
- */
-int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
+/* badly broken, I know. Is due for a fixup anytime. */
+static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
{
- int ret;
-
- cdinfo(CD_OPEN, "entering cdrom_open\n");
-
- /* open is event synchronization point, check events first */
- check_disk_change(bdev);
-
- /* if this was a O_NONBLOCK open and we should honor the flags,
- * do a quick open without drive/disc integrity checks. */
- cdi->use_count++;
- if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
- ret = cdi->ops->open(cdi, 1);
- } else {
- ret = open_for_data(cdi);
- if (ret)
- goto err;
- cdrom_mmc3_profile(cdi);
- if (mode & FMODE_WRITE) {
- ret = -EROFS;
- if (cdrom_open_write(cdi))
- goto err_release;
- if (!CDROM_CAN(CDC_RAM))
- goto err_release;
- ret = 0;
- cdi->media_written = 0;
- }
+ struct cdrom_tochdr header;
+ struct cdrom_tocentry entry;
+ int ret, i;
+ tracks->data = 0;
+ tracks->audio = 0;
+ tracks->cdi = 0;
+ tracks->xa = 0;
+ tracks->error = 0;
+ cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+ /* Grab the TOC header so we can see how many tracks there are */
+ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
+ if (ret) {
+ if (ret == -ENOMEDIUM)
+ tracks->error = CDS_NO_DISC;
+ else
+ tracks->error = CDS_NO_INFO;
+ return;
}
-
- if (ret)
- goto err;
-
- cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
- cdi->name, cdi->use_count);
- return 0;
-err_release:
- if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
- cdi->ops->lock_door(cdi, 0);
- cdinfo(CD_OPEN, "door unlocked.\n");
+ /* check what type of tracks are on this disc */
+ entry.cdte_format = CDROM_MSF;
+ for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) {
+ entry.cdte_track = i;
+ if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+ if (entry.cdte_ctrl & CDROM_DATA_TRACK) {
+ if (entry.cdte_format == 0x10)
+ tracks->cdi++;
+ else if (entry.cdte_format == 0x20)
+ tracks->xa++;
+ else
+ tracks->data++;
+ } else {
+ tracks->audio++;
+ }
+ cd_dbg(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n",
+ i, entry.cdte_format, entry.cdte_ctrl);
}
- cdi->ops->release(cdi);
-err:
- cdi->use_count--;
- return ret;
+ cd_dbg(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n",
+ header.cdth_trk1, tracks->audio, tracks->data,
+ tracks->cdi, tracks->xa);
}
static
-int open_for_data(struct cdrom_device_info * cdi)
+int open_for_data(struct cdrom_device_info *cdi)
{
int ret;
struct cdrom_device_ops *cdo = cdi->ops;
tracktype tracks;
- cdinfo(CD_OPEN, "entering open_for_data\n");
+ cd_dbg(CD_OPEN, "entering open_for_data\n");
/* Check if the driver can report drive status. If it can, we
can do clever things. If it can't, well, we at least tried! */
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
- cdinfo(CD_OPEN, "drive_status=%d\n", ret);
+ cd_dbg(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
- cdinfo(CD_OPEN, "the tray is open...\n");
+ cd_dbg(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
- cdinfo(CD_OPEN, "trying to close the tray.\n");
+ cd_dbg(CD_OPEN, "trying to close the tray\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
- cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n");
+ cd_dbg(CD_OPEN, "bummer. tried to close the tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
@@ -1054,19 +1061,19 @@ int open_for_data(struct cdrom_device_info * cdi)
goto clean_up_and_return;
}
} else {
- cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n");
+ cd_dbg(CD_OPEN, "bummer. this drive can't close the tray.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
- cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
- cdinfo(CD_OPEN, "tray might not contain a medium.\n");
+ cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n");
+ cd_dbg(CD_OPEN, "tray might not contain a medium\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
- cdinfo(CD_OPEN, "the tray is now closed.\n");
+ cd_dbg(CD_OPEN, "the tray is now closed\n");
}
/* the door should be closed now, check for the disc */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
@@ -1077,7 +1084,7 @@ int open_for_data(struct cdrom_device_info * cdi)
}
cdrom_count_tracks(cdi, &tracks);
if (tracks.error == CDS_NO_DISC) {
- cdinfo(CD_OPEN, "bummer. no disc.\n");
+ cd_dbg(CD_OPEN, "bummer. no disc.\n");
ret=-ENOMEDIUM;
goto clean_up_and_return;
}
@@ -1087,34 +1094,34 @@ int open_for_data(struct cdrom_device_info * cdi)
if (cdi->options & CDO_CHECK_TYPE) {
/* give people a warning shot, now that CDO_CHECK_TYPE
is the default case! */
- cdinfo(CD_OPEN, "bummer. wrong media type.\n");
- cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n",
- (unsigned int)task_pid_nr(current));
+ cd_dbg(CD_OPEN, "bummer. wrong media type.\n");
+ cd_dbg(CD_WARNING, "pid %d must open device O_NONBLOCK!\n",
+ (unsigned int)task_pid_nr(current));
ret=-EMEDIUMTYPE;
goto clean_up_and_return;
}
else {
- cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n");
+ cd_dbg(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set\n");
}
}
- cdinfo(CD_OPEN, "all seems well, opening the device.\n");
+ cd_dbg(CD_OPEN, "all seems well, opening the devicen");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
- cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret);
+ cd_dbg(CD_OPEN, "opening the device gave me %d\n", ret);
/* After all this careful checking, we shouldn't have problems
opening the device, but we don't want the device locked if
this somehow fails... */
if (ret) {
- cdinfo(CD_OPEN, "open device failed.\n");
+ cd_dbg(CD_OPEN, "open device failed\n");
goto clean_up_and_return;
}
if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) {
cdo->lock_door(cdi, 1);
- cdinfo(CD_OPEN, "door locked.\n");
+ cd_dbg(CD_OPEN, "door locked\n");
}
- cdinfo(CD_OPEN, "device opened successfully.\n");
+ cd_dbg(CD_OPEN, "device opened successfully\n");
return ret;
/* Something failed. Try to unlock the drive, because some drivers
@@ -1123,14 +1130,70 @@ int open_for_data(struct cdrom_device_info * cdi)
This ensures that the drive gets unlocked after a mount fails. This
is a goto to avoid bloating the driver with redundant code. */
clean_up_and_return:
- cdinfo(CD_OPEN, "open failed.\n");
+ cd_dbg(CD_OPEN, "open failed\n");
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdo->lock_door(cdi, 0);
- cdinfo(CD_OPEN, "door unlocked.\n");
+ cd_dbg(CD_OPEN, "door unlocked\n");
}
return ret;
}
+/* We use the open-option O_NONBLOCK to indicate that the
+ * purpose of opening is only for subsequent ioctl() calls; no device
+ * integrity checks are performed.
+ *
+ * We hope that all cd-player programs will adopt this convention. It
+ * is in their own interest: device control becomes a lot easier
+ * this way.
+ */
+int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode)
+{
+ int ret;
+
+ cd_dbg(CD_OPEN, "entering cdrom_open\n");
+
+ /* open is event synchronization point, check events first */
+ check_disk_change(bdev);
+
+ /* if this was a O_NONBLOCK open and we should honor the flags,
+ * do a quick open without drive/disc integrity checks. */
+ cdi->use_count++;
+ if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
+ ret = cdi->ops->open(cdi, 1);
+ } else {
+ ret = open_for_data(cdi);
+ if (ret)
+ goto err;
+ cdrom_mmc3_profile(cdi);
+ if (mode & FMODE_WRITE) {
+ ret = -EROFS;
+ if (cdrom_open_write(cdi))
+ goto err_release;
+ if (!CDROM_CAN(CDC_RAM))
+ goto err_release;
+ ret = 0;
+ cdi->media_written = 0;
+ }
+ }
+
+ if (ret)
+ goto err;
+
+ cd_dbg(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
+ cdi->name, cdi->use_count);
+ return 0;
+err_release:
+ if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+ cdi->ops->lock_door(cdi, 0);
+ cd_dbg(CD_OPEN, "door unlocked\n");
+ }
+ cdi->ops->release(cdi);
+err:
+ cdi->use_count--;
+ return ret;
+}
+
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
*/
@@ -1139,21 +1202,21 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
{
int ret;
tracktype tracks;
- cdinfo(CD_OPEN, "entering check_for_audio_disc\n");
+ cd_dbg(CD_OPEN, "entering check_for_audio_disc\n");
if (!(cdi->options & CDO_CHECK_TYPE))
return 0;
if (cdo->drive_status != NULL) {
ret = cdo->drive_status(cdi, CDSL_CURRENT);
- cdinfo(CD_OPEN, "drive_status=%d\n", ret);
+ cd_dbg(CD_OPEN, "drive_status=%d\n", ret);
if (ret == CDS_TRAY_OPEN) {
- cdinfo(CD_OPEN, "the tray is open...\n");
+ cd_dbg(CD_OPEN, "the tray is open...\n");
/* can/may i close it? */
if (CDROM_CAN(CDC_CLOSE_TRAY) &&
cdi->options & CDO_AUTO_CLOSE) {
- cdinfo(CD_OPEN, "trying to close the tray.\n");
+ cd_dbg(CD_OPEN, "trying to close the tray\n");
ret=cdo->tray_move(cdi,0);
if (ret) {
- cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n");
+ cd_dbg(CD_OPEN, "bummer. tried to close tray but failed.\n");
/* Ignore the error from the low
level driver. We don't care why it
couldn't close the tray. We only care
@@ -1162,20 +1225,20 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
return -ENOMEDIUM;
}
} else {
- cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n");
+ cd_dbg(CD_OPEN, "bummer. this driver can't close the tray.\n");
return -ENOMEDIUM;
}
/* Ok, the door should be closed now.. Check again */
ret = cdo->drive_status(cdi, CDSL_CURRENT);
if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
- cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
+ cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n");
return -ENOMEDIUM;
}
if (ret!=CDS_DISC_OK) {
- cdinfo(CD_OPEN, "bummer. disc isn't ready.\n");
+ cd_dbg(CD_OPEN, "bummer. disc isn't ready.\n");
return -EIO;
}
- cdinfo(CD_OPEN, "the tray is now closed.\n");
+ cd_dbg(CD_OPEN, "the tray is now closed\n");
}
}
cdrom_count_tracks(cdi, &tracks);
@@ -1193,17 +1256,18 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
- cdinfo(CD_CLOSE, "entering cdrom_release\n");
+ cd_dbg(CD_CLOSE, "entering cdrom_release\n");
if (cdi->use_count > 0)
cdi->use_count--;
if (cdi->use_count == 0) {
- cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
+ cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n",
+ cdi->name);
cdrom_dvd_rw_close_write(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
- cdinfo(CD_CLOSE, "Unlocking door!\n");
+ cd_dbg(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
}
@@ -1262,7 +1326,7 @@ static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot)
struct cdrom_changer_info *info;
int ret;
- cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n");
+ cd_dbg(CD_CHANGER, "entering cdrom_slot_status()\n");
if (cdi->sanyo_slot)
return CDS_NO_INFO;
@@ -1292,7 +1356,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi)
int nslots = 1;
struct cdrom_changer_info *info;
- cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n");
+ cd_dbg(CD_CHANGER, "entering cdrom_number_of_slots()\n");
/* cdrom_read_mech_status requires a valid value for capacity: */
cdi->capacity = 0;
@@ -1313,7 +1377,7 @@ static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot)
{
struct packet_command cgc;
- cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n");
+ cd_dbg(CD_CHANGER, "entering cdrom_load_unload()\n");
if (cdi->sanyo_slot && slot < 0)
return 0;
@@ -1342,7 +1406,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
int curslot;
int ret;
- cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n");
+ cd_dbg(CD_CHANGER, "entering cdrom_select_disc()\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -EDRIVE_CANT_DO_THIS;
@@ -1476,51 +1540,6 @@ int cdrom_media_changed(struct cdrom_device_info *cdi)
return media_changed(cdi, 0);
}
-/* badly broken, I know. Is due for a fixup anytime. */
-static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks)
-{
- struct cdrom_tochdr header;
- struct cdrom_tocentry entry;
- int ret, i;
- tracks->data=0;
- tracks->audio=0;
- tracks->cdi=0;
- tracks->xa=0;
- tracks->error=0;
- cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
- /* Grab the TOC header so we can see how many tracks there are */
- if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) {
- if (ret == -ENOMEDIUM)
- tracks->error = CDS_NO_DISC;
- else
- tracks->error = CDS_NO_INFO;
- return;
- }
- /* check what type of tracks are on this disc */
- entry.cdte_format = CDROM_MSF;
- for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) {
- entry.cdte_track = i;
- if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) {
- tracks->error=CDS_NO_INFO;
- return;
- }
- if (entry.cdte_ctrl & CDROM_DATA_TRACK) {
- if (entry.cdte_format == 0x10)
- tracks->cdi++;
- else if (entry.cdte_format == 0x20)
- tracks->xa++;
- else
- tracks->data++;
- } else
- tracks->audio++;
- cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n",
- i, entry.cdte_format, entry.cdte_ctrl);
- }
- cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n",
- header.cdth_trk1, tracks->audio, tracks->data,
- tracks->cdi, tracks->xa);
-}
-
/* Requests to the low-level drivers will /always/ be done in the
following format convention:
@@ -1632,7 +1651,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
switch (ai->type) {
/* LU data send */
case DVD_LU_SEND_AGID:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_AGID\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lsa.agid, 0);
@@ -1644,7 +1663,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
break;
case DVD_LU_SEND_KEY1:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_KEY1\n");
setup_report_key(&cgc, ai->lsk.agid, 2);
if ((ret = cdo->generic_packet(cdi, &cgc)))
@@ -1655,7 +1674,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
break;
case DVD_LU_SEND_CHALLENGE:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n");
setup_report_key(&cgc, ai->lsc.agid, 1);
if ((ret = cdo->generic_packet(cdi, &cgc)))
@@ -1667,7 +1686,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
/* Post-auth key */
case DVD_LU_SEND_TITLE_KEY:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n");
cgc.quiet = 1;
setup_report_key(&cgc, ai->lstk.agid, 4);
cgc.cmd[5] = ai->lstk.lba;
@@ -1686,7 +1705,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
break;
case DVD_LU_SEND_ASF:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_ASF\n");
setup_report_key(&cgc, ai->lsasf.agid, 5);
if ((ret = cdo->generic_packet(cdi, &cgc)))
@@ -1697,7 +1716,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
/* LU data receive (LU changes state) */
case DVD_HOST_SEND_CHALLENGE:
- cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n");
+ cd_dbg(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n");
setup_send_key(&cgc, ai->hsc.agid, 1);
buf[1] = 0xe;
copy_chal(&buf[4], ai->hsc.chal);
@@ -1709,7 +1728,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
break;
case DVD_HOST_SEND_KEY2:
- cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n");
+ cd_dbg(CD_DVD, "entering DVD_HOST_SEND_KEY2\n");
setup_send_key(&cgc, ai->hsk.agid, 3);
buf[1] = 0xa;
copy_key(&buf[4], ai->hsk.key);
@@ -1724,7 +1743,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
/* Misc */
case DVD_INVALIDATE_AGID:
cgc.quiet = 1;
- cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
+ cd_dbg(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
setup_report_key(&cgc, ai->lsa.agid, 0x3f);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
@@ -1732,7 +1751,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
/* Get region settings */
case DVD_LU_SEND_RPC_STATE:
- cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
+ cd_dbg(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
setup_report_key(&cgc, 0, 8);
memset(&rpc_state, 0, sizeof(rpc_state_t));
cgc.buffer = (char *) &rpc_state;
@@ -1749,7 +1768,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
/* Set region settings */
case DVD_HOST_SEND_RPC_STATE:
- cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n");
+ cd_dbg(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n");
setup_send_key(&cgc, 0, 6);
buf[1] = 6;
buf[4] = ai->hrpcs.pdrc;
@@ -1759,7 +1778,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
break;
default:
- cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
+ cd_dbg(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
return -ENOTTY;
}
@@ -1891,7 +1910,8 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
s->bca.len = buf[0] << 8 | buf[1];
if (s->bca.len < 12 || s->bca.len > 188) {
- cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
+ cd_dbg(CD_WARNING, "Received invalid BCA length (%d)\n",
+ s->bca.len);
ret = -EIO;
goto out;
}
@@ -1927,14 +1947,13 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
s->manufact.len = buf[0] << 8 | buf[1];
if (s->manufact.len < 0) {
- cdinfo(CD_WARNING, "Received invalid manufacture info length"
- " (%d)\n", s->manufact.len);
+ cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d)\n",
+ s->manufact.len);
ret = -EIO;
} else {
if (s->manufact.len > 2048) {
- cdinfo(CD_WARNING, "Received invalid manufacture info "
- "length (%d): truncating to 2048\n",
- s->manufact.len);
+ cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d): truncating to 2048\n",
+ s->manufact.len);
s->manufact.len = 2048;
}
memcpy(s->manufact.value, &buf[4], s->manufact.len);
@@ -1965,8 +1984,8 @@ static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
return dvd_read_manufact(cdi, s, cgc);
default:
- cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
- s->type);
+ cd_dbg(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
+ s->type);
return -EINVAL;
}
}
@@ -2255,7 +2274,7 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
u8 requested_format;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
if (!(cdi->ops->capability & CDC_MULTI_SESSION))
return -ENOSYS;
@@ -2277,13 +2296,13 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
return -EFAULT;
- cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
+ cd_dbg(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
return 0;
}
static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
{
- cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
@@ -2300,7 +2319,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
{
- cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
if (!CDROM_CAN(CDC_CLOSE_TRAY))
return -ENOSYS;
@@ -2310,7 +2329,7 @@ static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
@@ -2329,7 +2348,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
struct cdrom_changer_info *info;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return -ENOSYS;
@@ -2355,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
/*
* Options need to be in sync with capability.
@@ -2383,7 +2402,7 @@ static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
cdi->options &= ~(int) arg;
return cdi->options;
@@ -2392,7 +2411,7 @@ static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
if (!CDROM_CAN(CDC_SELECT_SPEED))
return -ENOSYS;
@@ -2402,7 +2421,7 @@ static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
if (!CDROM_CAN(CDC_SELECT_DISC))
return -ENOSYS;
@@ -2420,14 +2439,14 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
if (cdi->ops->select_disc)
return cdi->ops->select_disc(cdi, arg);
- cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
+ cd_dbg(CD_CHANGER, "Using generic cdrom_select_disc()\n");
return cdrom_select_disc(cdi, arg);
}
static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
struct block_device *bdev)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_RESET\n");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -2440,7 +2459,7 @@ static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
+ cd_dbg(CD_DO_IOCTL, "%socking door\n", arg ? "L" : "Unl");
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
@@ -2459,7 +2478,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
+ cd_dbg(CD_DO_IOCTL, "%sabling debug\n", arg ? "En" : "Dis");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -2469,7 +2488,7 @@ static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
return (cdi->ops->capability & ~cdi->mask);
}
@@ -2485,7 +2504,7 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
struct cdrom_mcn mcn;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
if (!(cdi->ops->capability & CDC_MCN))
return -ENOSYS;
@@ -2495,14 +2514,14 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
if (copy_to_user(argp, &mcn, sizeof(mcn)))
return -EFAULT;
- cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
+ cd_dbg(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
return 0;
}
static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
unsigned long arg)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
return -ENOSYS;
@@ -2535,7 +2554,7 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
{
tracktype tracks;
- cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
cdrom_count_tracks(cdi, &tracks);
if (tracks.error)
@@ -2557,13 +2576,13 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
return CDS_DATA_1;
/* Policy mode off */
- cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
+ cd_dbg(CD_WARNING, "This disc doesn't have any tracks I recognize!\n");
return CDS_NO_INFO;
}
static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
{
- cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
return cdi->capacity;
}
@@ -2574,7 +2593,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
u8 requested, back;
int ret;
- /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
+ /* cd_dbg(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
if (copy_from_user(&q, argp, sizeof(q)))
return -EFAULT;
@@ -2594,7 +2613,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
if (copy_to_user(argp, &q, sizeof(q)))
return -EFAULT;
- /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+ /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
@@ -2604,7 +2623,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
struct cdrom_tochdr header;
int ret;
- /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
+ /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
if (copy_from_user(&header, argp, sizeof(header)))
return -EFAULT;
@@ -2615,7 +2634,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
if (copy_to_user(argp, &header, sizeof(header)))
return -EFAULT;
- /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
+ /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
return 0;
}
@@ -2626,7 +2645,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
u8 requested_format;
int ret;
- /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
+ /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
@@ -2643,7 +2662,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
if (copy_to_user(argp, &entry, sizeof(entry)))
return -EFAULT;
- /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
+ /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
return 0;
}
@@ -2652,7 +2671,7 @@ static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
{
struct cdrom_msf msf;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
@@ -2667,7 +2686,7 @@ static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
struct cdrom_ti ti;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
@@ -2684,7 +2703,7 @@ static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
{
struct cdrom_volctrl volume;
- cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
@@ -2699,7 +2718,7 @@ static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
struct cdrom_volctrl volume;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
@@ -2718,7 +2737,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
{
int ret;
- cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
+ cd_dbg(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
@@ -2729,103 +2748,6 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
}
/*
- * Just about every imaginable ioctl is supported in the Uniform layer
- * these days.
- * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
- */
-int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int ret;
-
- /*
- * Try the generic SCSI command ioctl's first.
- */
- ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
- if (ret != -ENOTTY)
- return ret;
-
- switch (cmd) {
- case CDROMMULTISESSION:
- return cdrom_ioctl_multisession(cdi, argp);
- case CDROMEJECT:
- return cdrom_ioctl_eject(cdi);
- case CDROMCLOSETRAY:
- return cdrom_ioctl_closetray(cdi);
- case CDROMEJECT_SW:
- return cdrom_ioctl_eject_sw(cdi, arg);
- case CDROM_MEDIA_CHANGED:
- return cdrom_ioctl_media_changed(cdi, arg);
- case CDROM_SET_OPTIONS:
- return cdrom_ioctl_set_options(cdi, arg);
- case CDROM_CLEAR_OPTIONS:
- return cdrom_ioctl_clear_options(cdi, arg);
- case CDROM_SELECT_SPEED:
- return cdrom_ioctl_select_speed(cdi, arg);
- case CDROM_SELECT_DISC:
- return cdrom_ioctl_select_disc(cdi, arg);
- case CDROMRESET:
- return cdrom_ioctl_reset(cdi, bdev);
- case CDROM_LOCKDOOR:
- return cdrom_ioctl_lock_door(cdi, arg);
- case CDROM_DEBUG:
- return cdrom_ioctl_debug(cdi, arg);
- case CDROM_GET_CAPABILITY:
- return cdrom_ioctl_get_capability(cdi);
- case CDROM_GET_MCN:
- return cdrom_ioctl_get_mcn(cdi, argp);
- case CDROM_DRIVE_STATUS:
- return cdrom_ioctl_drive_status(cdi, arg);
- case CDROM_DISC_STATUS:
- return cdrom_ioctl_disc_status(cdi);
- case CDROM_CHANGER_NSLOTS:
- return cdrom_ioctl_changer_nslots(cdi);
- }
-
- /*
- * Use the ioctls that are implemented through the generic_packet()
- * interface. this may look at bit funny, but if -ENOTTY is
- * returned that particular ioctl is not implemented and we
- * let it go through the device specific ones.
- */
- if (CDROM_CAN(CDC_GENERIC_PACKET)) {
- ret = mmc_ioctl(cdi, cmd, arg);
- if (ret != -ENOTTY)
- return ret;
- }
-
- /*
- * Note: most of the cdinfo() calls are commented out here,
- * because they fill up the sys log when CD players poll
- * the drive.
- */
- switch (cmd) {
- case CDROMSUBCHNL:
- return cdrom_ioctl_get_subchnl(cdi, argp);
- case CDROMREADTOCHDR:
- return cdrom_ioctl_read_tochdr(cdi, argp);
- case CDROMREADTOCENTRY:
- return cdrom_ioctl_read_tocentry(cdi, argp);
- case CDROMPLAYMSF:
- return cdrom_ioctl_play_msf(cdi, argp);
- case CDROMPLAYTRKIND:
- return cdrom_ioctl_play_trkind(cdi, argp);
- case CDROMVOLCTRL:
- return cdrom_ioctl_volctrl(cdi, argp);
- case CDROMVOLREAD:
- return cdrom_ioctl_volread(cdi, argp);
- case CDROMSTART:
- case CDROMSTOP:
- case CDROMPAUSE:
- case CDROMRESUME:
- return cdrom_ioctl_audioctl(cdi, cmd);
- }
-
- return -ENOSYS;
-}
-
-/*
* Required when we need to use READ_10 to issue other than 2048 block
* reads
*/
@@ -2854,10 +2776,158 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
return cdo->generic_packet(cdi, &cgc);
}
+static int cdrom_get_track_info(struct cdrom_device_info *cdi,
+ __u16 track, __u8 type, track_information *ti)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct packet_command cgc;
+ int ret, buflen;
+
+ init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+ cgc.cmd[1] = type & 3;
+ cgc.cmd[4] = (track & 0xff00) >> 8;
+ cgc.cmd[5] = track & 0xff;
+ cgc.cmd[8] = 8;
+ cgc.quiet = 1;
+
+ ret = cdo->generic_packet(cdi, &cgc);
+ if (ret)
+ return ret;
+
+ buflen = be16_to_cpu(ti->track_information_length) +
+ sizeof(ti->track_information_length);
+
+ if (buflen > sizeof(track_information))
+ buflen = sizeof(track_information);
+
+ cgc.cmd[8] = cgc.buflen = buflen;
+ ret = cdo->generic_packet(cdi, &cgc);
+ if (ret)
+ return ret;
+
+ /* return actual fill size */
+ return buflen;
+}
+
+/* return the last written block on the CD-R media. this is for the udf
+ file system. */
+int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
+{
+ struct cdrom_tocentry toc;
+ disc_information di;
+ track_information ti;
+ __u32 last_track;
+ int ret = -1, ti_size;
+
+ if (!CDROM_CAN(CDC_GENERIC_PACKET))
+ goto use_toc;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < (int)(offsetof(typeof(di), last_track_lsb)
+ + sizeof(di.last_track_lsb)))
+ goto use_toc;
+
+ /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < (int)offsetof(typeof(ti), track_start))
+ goto use_toc;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ if (last_track == 1)
+ goto use_toc;
+ last_track--;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ }
+
+ if (ti_size < (int)(offsetof(typeof(ti), track_size)
+ + sizeof(ti.track_size)))
+ goto use_toc;
+
+ /* if last recorded field is valid, return it. */
+ if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address)
+ + sizeof(ti.last_rec_address))) {
+ *last_written = be32_to_cpu(ti.last_rec_address);
+ } else {
+ /* make it up instead */
+ *last_written = be32_to_cpu(ti.track_start) +
+ be32_to_cpu(ti.track_size);
+ if (ti.free_blocks)
+ *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ }
+ return 0;
+
+ /* this is where we end up if the drive either can't do a
+ GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if
+ it doesn't give enough information or fails. then we return
+ the toc contents. */
+use_toc:
+ toc.cdte_format = CDROM_MSF;
+ toc.cdte_track = CDROM_LEADOUT;
+ if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
+ return ret;
+ sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA);
+ *last_written = toc.cdte_addr.lba;
+ return 0;
+}
+
+/* return the next writable block. also for udf file system. */
+static int cdrom_get_next_writable(struct cdrom_device_info *cdi,
+ long *next_writable)
+{
+ disc_information di;
+ track_information ti;
+ __u16 last_track;
+ int ret, ti_size;
+
+ if (!CDROM_CAN(CDC_GENERIC_PACKET))
+ goto use_last_written;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb)
+ + sizeof(di.last_track_lsb))
+ goto use_last_written;
+
+ /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start))
+ goto use_last_written;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ if (last_track == 1)
+ goto use_last_written;
+ last_track--;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < 0)
+ goto use_last_written;
+ }
+
+ /* if next recordable address field is valid, use it. */
+ if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable)
+ + sizeof(ti.next_writable)) {
+ *next_writable = be32_to_cpu(ti.next_writable);
+ return 0;
+ }
+
+use_last_written:
+ ret = cdrom_get_last_written(cdi, next_writable);
+ if (ret) {
+ *next_writable = 0;
+ return ret;
+ } else {
+ *next_writable += 7;
+ return 0;
+ }
+}
+
static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
- void __user *arg,
- struct packet_command *cgc,
- int cmd)
+ void __user *arg,
+ struct packet_command *cgc,
+ int cmd)
{
struct request_sense sense;
struct cdrom_msf msf;
@@ -2876,7 +2946,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
blocksize = CD_FRAMESIZE_RAW0;
break;
}
- IOCTL_IN(arg, struct cdrom_msf, msf);
+ if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf)))
+ return -EFAULT;
lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
/* FIXME: we need upper bound checking, too!! */
if (lba < 0)
@@ -2891,8 +2962,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
if (ret && sense.sense_key == 0x05 &&
- sense.asc == 0x20 &&
- sense.ascq == 0x00) {
+ sense.asc == 0x20 &&
+ sense.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
@@ -2913,12 +2984,14 @@ out:
}
static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
- void __user *arg)
+ void __user *arg)
{
struct cdrom_read_audio ra;
int lba;
- IOCTL_IN(arg, struct cdrom_read_audio, ra);
+ if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
+ sizeof(ra)))
+ return -EFAULT;
if (ra.addr_format == CDROM_MSF)
lba = msf_to_lba(ra.addr.msf.minute,
@@ -2937,12 +3010,13 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
- void __user *arg)
+ void __user *arg)
{
int ret;
struct cdrom_subchnl q;
u_char requested, back;
- IOCTL_IN(arg, struct cdrom_subchnl, q);
+ if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q)))
+ return -EFAULT;
requested = q.cdsc_format;
if (!((requested == CDROM_MSF) ||
(requested == CDROM_LBA)))
@@ -2954,19 +3028,21 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
back = q.cdsc_format; /* local copy */
sanitize_format(&q.cdsc_absaddr, &back, requested);
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
- IOCTL_OUT(arg, struct cdrom_subchnl, q);
- /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+ if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q)))
+ return -EFAULT;
+ /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
return 0;
}
static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
- void __user *arg,
- struct packet_command *cgc)
+ void __user *arg,
+ struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_msf msf;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
- IOCTL_IN(arg, struct cdrom_msf, msf);
+ cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+ if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf)))
+ return -EFAULT;
cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
cgc->cmd[3] = msf.cdmsf_min0;
cgc->cmd[4] = msf.cdmsf_sec0;
@@ -2979,13 +3055,14 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
- void __user *arg,
- struct packet_command *cgc)
+ void __user *arg,
+ struct packet_command *cgc)
{
struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_blk blk;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
- IOCTL_IN(arg, struct cdrom_blk, blk);
+ cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
+ if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk)))
+ return -EFAULT;
cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
cgc->cmd[2] = (blk.from >> 24) & 0xff;
cgc->cmd[3] = (blk.from >> 16) & 0xff;
@@ -2998,9 +3075,9 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
- void __user *arg,
- struct packet_command *cgc,
- unsigned int cmd)
+ void __user *arg,
+ struct packet_command *cgc,
+ unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
@@ -3008,9 +3085,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
unsigned short offset;
int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n");
- IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
+ if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg,
+ sizeof(volctrl)))
+ return -EFAULT;
cgc->buffer = buffer;
cgc->buflen = 24;
@@ -3030,14 +3109,14 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
- GPMODE_AUDIO_CTL_PAGE, 0);
+ GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
- buffer[offset + 1] < 14)
+ buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
@@ -3047,7 +3126,9 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
- IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
+ if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl,
+ sizeof(volctrl)))
+ return -EFAULT;
return 0;
}
@@ -3069,11 +3150,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
- struct packet_command *cgc,
- int cmd)
+ struct packet_command *cgc,
+ int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
- cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
cgc->cmd[0] = GPCMD_START_STOP_UNIT;
cgc->cmd[1] = 1;
cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
@@ -3082,11 +3163,11 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
- struct packet_command *cgc,
- int cmd)
+ struct packet_command *cgc,
+ int cmd)
{
struct cdrom_device_ops *cdo = cdi->ops;
- cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
cgc->cmd[0] = GPCMD_PAUSE_RESUME;
cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
cgc->data_direction = CGC_DATA_NONE;
@@ -3094,8 +3175,8 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
}
static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
- void __user *arg,
- struct packet_command *cgc)
+ void __user *arg,
+ struct packet_command *cgc)
{
int ret;
dvd_struct *s;
@@ -3108,7 +3189,7 @@ static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
if (!s)
return -ENOMEM;
- cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
+ cd_dbg(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
if (copy_from_user(s, arg, size)) {
kfree(s);
return -EFAULT;
@@ -3126,44 +3207,48 @@ out:
}
static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
- void __user *arg)
+ void __user *arg)
{
int ret;
dvd_authinfo ai;
if (!CDROM_CAN(CDC_DVD))
return -ENOSYS;
- cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
- IOCTL_IN(arg, dvd_authinfo, ai);
+ cd_dbg(CD_DO_IOCTL, "entering DVD_AUTH\n");
+ if (copy_from_user(&ai, (dvd_authinfo __user *)arg, sizeof(ai)))
+ return -EFAULT;
ret = dvd_do_auth(cdi, &ai);
if (ret)
return ret;
- IOCTL_OUT(arg, dvd_authinfo, ai);
+ if (copy_to_user((dvd_authinfo __user *)arg, &ai, sizeof(ai)))
+ return -EFAULT;
return 0;
}
static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
- void __user *arg)
+ void __user *arg)
{
int ret;
long next = 0;
- cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
ret = cdrom_get_next_writable(cdi, &next);
if (ret)
return ret;
- IOCTL_OUT(arg, long, next);
+ if (copy_to_user((long __user *)arg, &next, sizeof(next)))
+ return -EFAULT;
return 0;
}
static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
- void __user *arg)
+ void __user *arg)
{
int ret;
long last = 0;
- cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
ret = cdrom_get_last_written(cdi, &last);
if (ret)
return ret;
- IOCTL_OUT(arg, long, last);
+ if (copy_to_user((long __user *)arg, &last, sizeof(last)))
+ return -EFAULT;
return 0;
}
@@ -3212,181 +3297,101 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
return -ENOTTY;
}
-static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type,
- track_information *ti)
-{
- struct cdrom_device_ops *cdo = cdi->ops;
- struct packet_command cgc;
- int ret, buflen;
-
- init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
- cgc.cmd[1] = type & 3;
- cgc.cmd[4] = (track & 0xff00) >> 8;
- cgc.cmd[5] = track & 0xff;
- cgc.cmd[8] = 8;
- cgc.quiet = 1;
-
- if ((ret = cdo->generic_packet(cdi, &cgc)))
- return ret;
-
- buflen = be16_to_cpu(ti->track_information_length) +
- sizeof(ti->track_information_length);
-
- if (buflen > sizeof(track_information))
- buflen = sizeof(track_information);
-
- cgc.cmd[8] = cgc.buflen = buflen;
- if ((ret = cdo->generic_packet(cdi, &cgc)))
- return ret;
-
- /* return actual fill size */
- return buflen;
-}
-
-/* requires CD R/RW */
-static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di)
+/*
+ * Just about every imaginable ioctl is supported in the Uniform layer
+ * these days.
+ * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
+ */
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
{
- struct cdrom_device_ops *cdo = cdi->ops;
- struct packet_command cgc;
- int ret, buflen;
-
- /* set up command and get the disc info */
- init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DISC_INFO;
- cgc.cmd[8] = cgc.buflen = 2;
- cgc.quiet = 1;
-
- if ((ret = cdo->generic_packet(cdi, &cgc)))
- return ret;
+ void __user *argp = (void __user *)arg;
+ int ret;
- /* not all drives have the same disc_info length, so requeue
- * packet with the length the drive tells us it can supply
+ /*
+ * Try the generic SCSI command ioctl's first.
*/
- buflen = be16_to_cpu(di->disc_information_length) +
- sizeof(di->disc_information_length);
-
- if (buflen > sizeof(disc_information))
- buflen = sizeof(disc_information);
-
- cgc.cmd[8] = cgc.buflen = buflen;
- if ((ret = cdo->generic_packet(cdi, &cgc)))
+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ if (ret != -ENOTTY)
return ret;
- /* return actual fill size */
- return buflen;
-}
-
-/* return the last written block on the CD-R media. this is for the udf
- file system. */
-int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
-{
- struct cdrom_tocentry toc;
- disc_information di;
- track_information ti;
- __u32 last_track;
- int ret = -1, ti_size;
-
- if (!CDROM_CAN(CDC_GENERIC_PACKET))
- goto use_toc;
-
- ret = cdrom_get_disc_info(cdi, &di);
- if (ret < (int)(offsetof(typeof(di), last_track_lsb)
- + sizeof(di.last_track_lsb)))
- goto use_toc;
-
- /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
- if (ti_size < (int)offsetof(typeof(ti), track_start))
- goto use_toc;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- if (last_track==1)
- goto use_toc;
- last_track--;
- ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
- }
-
- if (ti_size < (int)(offsetof(typeof(ti), track_size)
- + sizeof(ti.track_size)))
- goto use_toc;
-
- /* if last recorded field is valid, return it. */
- if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address)
- + sizeof(ti.last_rec_address))) {
- *last_written = be32_to_cpu(ti.last_rec_address);
- } else {
- /* make it up instead */
- *last_written = be32_to_cpu(ti.track_start) +
- be32_to_cpu(ti.track_size);
- if (ti.free_blocks)
- *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ switch (cmd) {
+ case CDROMMULTISESSION:
+ return cdrom_ioctl_multisession(cdi, argp);
+ case CDROMEJECT:
+ return cdrom_ioctl_eject(cdi);
+ case CDROMCLOSETRAY:
+ return cdrom_ioctl_closetray(cdi);
+ case CDROMEJECT_SW:
+ return cdrom_ioctl_eject_sw(cdi, arg);
+ case CDROM_MEDIA_CHANGED:
+ return cdrom_ioctl_media_changed(cdi, arg);
+ case CDROM_SET_OPTIONS:
+ return cdrom_ioctl_set_options(cdi, arg);
+ case CDROM_CLEAR_OPTIONS:
+ return cdrom_ioctl_clear_options(cdi, arg);
+ case CDROM_SELECT_SPEED:
+ return cdrom_ioctl_select_speed(cdi, arg);
+ case CDROM_SELECT_DISC:
+ return cdrom_ioctl_select_disc(cdi, arg);
+ case CDROMRESET:
+ return cdrom_ioctl_reset(cdi, bdev);
+ case CDROM_LOCKDOOR:
+ return cdrom_ioctl_lock_door(cdi, arg);
+ case CDROM_DEBUG:
+ return cdrom_ioctl_debug(cdi, arg);
+ case CDROM_GET_CAPABILITY:
+ return cdrom_ioctl_get_capability(cdi);
+ case CDROM_GET_MCN:
+ return cdrom_ioctl_get_mcn(cdi, argp);
+ case CDROM_DRIVE_STATUS:
+ return cdrom_ioctl_drive_status(cdi, arg);
+ case CDROM_DISC_STATUS:
+ return cdrom_ioctl_disc_status(cdi);
+ case CDROM_CHANGER_NSLOTS:
+ return cdrom_ioctl_changer_nslots(cdi);
}
- return 0;
- /* this is where we end up if the drive either can't do a
- GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if
- it doesn't give enough information or fails. then we return
- the toc contents. */
-use_toc:
- toc.cdte_format = CDROM_MSF;
- toc.cdte_track = CDROM_LEADOUT;
- if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
- return ret;
- sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA);
- *last_written = toc.cdte_addr.lba;
- return 0;
-}
-
-/* return the next writable block. also for udf file system. */
-static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable)
-{
- disc_information di;
- track_information ti;
- __u16 last_track;
- int ret, ti_size;
-
- if (!CDROM_CAN(CDC_GENERIC_PACKET))
- goto use_last_written;
-
- ret = cdrom_get_disc_info(cdi, &di);
- if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb)
- + sizeof(di.last_track_lsb))
- goto use_last_written;
-
- /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
- if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start))
- goto use_last_written;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- if (last_track == 1)
- goto use_last_written;
- last_track--;
- ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
- if (ti_size < 0)
- goto use_last_written;
+ /*
+ * Use the ioctls that are implemented through the generic_packet()
+ * interface. this may look at bit funny, but if -ENOTTY is
+ * returned that particular ioctl is not implemented and we
+ * let it go through the device specific ones.
+ */
+ if (CDROM_CAN(CDC_GENERIC_PACKET)) {
+ ret = mmc_ioctl(cdi, cmd, arg);
+ if (ret != -ENOTTY)
+ return ret;
}
- /* if next recordable address field is valid, use it. */
- if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable)
- + sizeof(ti.next_writable)) {
- *next_writable = be32_to_cpu(ti.next_writable);
- return 0;
+ /*
+ * Note: most of the cd_dbg() calls are commented out here,
+ * because they fill up the sys log when CD players poll
+ * the drive.
+ */
+ switch (cmd) {
+ case CDROMSUBCHNL:
+ return cdrom_ioctl_get_subchnl(cdi, argp);
+ case CDROMREADTOCHDR:
+ return cdrom_ioctl_read_tochdr(cdi, argp);
+ case CDROMREADTOCENTRY:
+ return cdrom_ioctl_read_tocentry(cdi, argp);
+ case CDROMPLAYMSF:
+ return cdrom_ioctl_play_msf(cdi, argp);
+ case CDROMPLAYTRKIND:
+ return cdrom_ioctl_play_trkind(cdi, argp);
+ case CDROMVOLCTRL:
+ return cdrom_ioctl_volctrl(cdi, argp);
+ case CDROMVOLREAD:
+ return cdrom_ioctl_volread(cdi, argp);
+ case CDROMSTART:
+ case CDROMSTOP:
+ case CDROMPAUSE:
+ case CDROMRESUME:
+ return cdrom_ioctl_audioctl(cdi, cmd);
}
-use_last_written:
- if ((ret = cdrom_get_last_written(cdi, next_writable))) {
- *next_writable = 0;
- return ret;
- } else {
- *next_writable += 7;
- return 0;
- }
+ return -ENOSYS;
}
EXPORT_SYMBOL(cdrom_get_last_written);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 51e75ad96422..584bc3126403 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -602,7 +602,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
spin_unlock(&gdrom_lock);
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
- __raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
+ __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
__raw_writel(1, GDROM_DMA_ENABLE_REG);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 974321a2508d..14790304b84b 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -345,7 +345,6 @@ out:
free_irq(apbs[i].irq, &dummy);
iounmap(apbs[i].RamIO);
}
- pci_disable_device(dev);
return ret;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 102c50d38902..06cea7ff3a7c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk)
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
+EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif
/*********************************************************************
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 6f56d3a4f010..3a2196481b11 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -30,14 +30,7 @@ config COMMON_CLK_WM831X
Supports the clocking subsystem of the WM831x/2x series of
PMICs from Wolfson Microlectronics.
-config COMMON_CLK_VERSATILE
- bool "Clock driver for ARM Reference designs"
- depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64
- ---help---
- Supports clocking on ARM Reference designs:
- - Integrator/AP and Integrator/CP
- - RealView PB1176, EB, PB11MP and PBX
- - Versatile Express
+source "drivers/clk/versatile/Kconfig"
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77686 MFD"
@@ -115,3 +108,5 @@ endmenu
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/mvebu/Kconfig"
+
+source "drivers/clk/samsung/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 5f8a28735c96..17d7f13d19a5 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
-obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
+obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 46c1d3d0d66b..4998aee59267 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -2,8 +2,8 @@
# Makefile for at91 specific clk
#
-obj-y += pmc.o
-obj-y += clk-main.o clk-pll.o clk-plldiv.o clk-master.o
+obj-y += pmc.o sckc.o
+obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
obj-y += clk-system.o clk-peripheral.o clk-programmable.o
obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 8e9e8cc0412d..733306131b99 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -30,99 +30,546 @@
#define MAINF_LOOP_MIN_WAIT (USEC_PER_SEC / SLOW_CLOCK_FREQ)
#define MAINF_LOOP_MAX_WAIT MAINFRDY_TIMEOUT
-struct clk_main {
+#define MOR_KEY_MASK (0xff << 16)
+
+struct clk_main_osc {
struct clk_hw hw;
struct at91_pmc *pmc;
- unsigned long rate;
unsigned int irq;
wait_queue_head_t wait;
};
-#define to_clk_main(hw) container_of(hw, struct clk_main, hw)
+#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
+
+struct clk_main_rc_osc {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ unsigned long frequency;
+ unsigned long accuracy;
+};
+
+#define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw)
+
+struct clk_rm9200_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw)
-static irqreturn_t clk_main_irq_handler(int irq, void *dev_id)
+struct clk_sam9x5_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 parent;
+};
+
+#define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw)
+
+static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id)
{
- struct clk_main *clkmain = (struct clk_main *)dev_id;
+ struct clk_main_osc *osc = dev_id;
- wake_up(&clkmain->wait);
- disable_irq_nosync(clkmain->irq);
+ wake_up(&osc->wait);
+ disable_irq_nosync(osc->irq);
return IRQ_HANDLED;
}
-static int clk_main_prepare(struct clk_hw *hw)
+static int clk_main_osc_prepare(struct clk_hw *hw)
{
- struct clk_main *clkmain = to_clk_main(hw);
- struct at91_pmc *pmc = clkmain->pmc;
- unsigned long halt_time, timeout;
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
u32 tmp;
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return 0;
+
+ if (!(tmp & AT91_PMC_MOSCEN)) {
+ tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY;
+ pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ }
+
while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
+ enable_irq(osc->irq);
+ wait_event(osc->wait,
pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
}
- if (clkmain->rate)
- return 0;
+ return 0;
+}
+
+static void clk_main_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return;
+
+ if (!(tmp & AT91_PMC_MOSCEN))
+ return;
+
+ tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN);
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+}
+
+static int clk_main_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return 1;
+
+ return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) &&
+ (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN));
+}
+
+static const struct clk_ops main_osc_ops = {
+ .prepare = clk_main_osc_prepare,
+ .unprepare = clk_main_osc_unprepare,
+ .is_prepared = clk_main_osc_is_prepared,
+};
+
+static struct clk * __init
+at91_clk_register_main_osc(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char *parent_name,
+ bool bypass)
+{
+ int ret;
+ struct clk_main_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_osc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->pmc = pmc;
+ osc->irq = irq;
+
+ init_waitqueue_head(&osc->wait);
+ irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
+ ret = request_irq(osc->irq, clk_main_osc_irq_handler,
+ IRQF_TRIGGER_HIGH, name, osc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bypass)
+ pmc_write(pmc, AT91_CKGR_MOR,
+ (pmc_read(pmc, AT91_CKGR_MOR) &
+ ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) |
+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk)) {
+ free_irq(irq, osc);
+ kfree(osc);
+ }
+
+ return clk;
+}
+
+void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ const char *name = np->name;
+ const char *parent_name;
+ bool bypass;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
+{
+ struct clk_main_rc_osc *osc = dev_id;
+
+ wake_up(&osc->wait);
+ disable_irq_nosync(osc->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_main_rc_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+
+ if (!(tmp & AT91_PMC_MOSCRCEN)) {
+ tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY;
+ pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ }
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) {
+ enable_irq(osc->irq);
+ wait_event(osc->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS);
+ }
+
+ return 0;
+}
+
+static void clk_main_rc_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (!(tmp & AT91_PMC_MOSCRCEN))
+ return;
+
+ tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN);
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+}
+
+static int clk_main_rc_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+
+ return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) &&
+ (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN));
+}
+
+static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ return osc->frequency;
+}
+
+static unsigned long clk_main_rc_osc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_acc)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ return osc->accuracy;
+}
+
+static const struct clk_ops main_rc_osc_ops = {
+ .prepare = clk_main_rc_osc_prepare,
+ .unprepare = clk_main_rc_osc_unprepare,
+ .is_prepared = clk_main_rc_osc_is_prepared,
+ .recalc_rate = clk_main_rc_osc_recalc_rate,
+ .recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
+};
+
+static struct clk * __init
+at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ u32 frequency, u32 accuracy)
+{
+ int ret;
+ struct clk_main_rc_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !frequency)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_rc_osc_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->pmc = pmc;
+ osc->irq = irq;
+ osc->frequency = frequency;
+ osc->accuracy = accuracy;
+
+ init_waitqueue_head(&osc->wait);
+ irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
+ ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler,
+ IRQF_TRIGGER_HIGH, name, osc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk)) {
+ free_irq(irq, osc);
+ kfree(osc);
+ }
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ const char *name = np->name;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency,
+ accuracy);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+
+static int clk_main_probe_frequency(struct at91_pmc *pmc)
+{
+ unsigned long prep_time, timeout;
+ u32 tmp;
timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
do {
- halt_time = jiffies;
+ prep_time = jiffies;
tmp = pmc_read(pmc, AT91_CKGR_MCFR);
if (tmp & AT91_PMC_MAINRDY)
return 0;
usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
- } while (time_before(halt_time, timeout));
+ } while (time_before(prep_time, timeout));
- return 0;
+ return -ETIMEDOUT;
}
-static int clk_main_is_prepared(struct clk_hw *hw)
+static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
+ unsigned long parent_rate)
{
- struct clk_main *clkmain = to_clk_main(hw);
+ u32 tmp;
+
+ if (parent_rate)
+ return parent_rate;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR);
+ if (!(tmp & AT91_PMC_MAINRDY))
+ return 0;
- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+ return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
}
-static unsigned long clk_main_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static int clk_rm9200_main_prepare(struct clk_hw *hw)
{
- u32 tmp;
- struct clk_main *clkmain = to_clk_main(hw);
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return clk_main_probe_frequency(clkmain->pmc);
+}
+
+static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY);
+}
+
+static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+}
+
+static const struct clk_ops rm9200_main_ops = {
+ .prepare = clk_rm9200_main_prepare,
+ .is_prepared = clk_rm9200_main_is_prepared,
+ .recalc_rate = clk_rm9200_main_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_rm9200_main(struct at91_pmc *pmc,
+ const char *name,
+ const char *parent_name)
+{
+ struct clk_rm9200_main *clkmain;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!parent_name)
+ return ERR_PTR(-EINVAL);
+
+ clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
+ if (!clkmain)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &rm9200_main_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ clkmain->hw.init = &init;
+ clkmain->pmc = pmc;
+
+ clk = clk_register(NULL, &clkmain->hw);
+ if (IS_ERR(clk))
+ kfree(clkmain);
+
+ return clk;
+}
+
+void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_rm9200_main(pmc, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
+{
+ struct clk_sam9x5_main *clkmain = dev_id;
+
+ wake_up(&clkmain->wait);
+ disable_irq_nosync(clkmain->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_sam9x5_main_prepare(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
struct at91_pmc *pmc = clkmain->pmc;
- if (clkmain->rate)
- return clkmain->rate;
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ }
+
+ return clk_main_probe_frequency(pmc);
+}
- tmp = pmc_read(pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINF;
- clkmain->rate = (tmp * parent_rate) / MAINF_DIV;
+static int clk_sam9x5_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- return clkmain->rate;
+ return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
}
-static const struct clk_ops main_ops = {
- .prepare = clk_main_prepare,
- .is_prepared = clk_main_is_prepared,
- .recalc_rate = clk_main_recalc_rate,
+static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+}
+
+static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+ u32 tmp;
+
+ if (index > 1)
+ return -EINVAL;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+
+ if (index && !(tmp & AT91_PMC_MOSCSEL))
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ else if (!index && (tmp & AT91_PMC_MOSCSEL))
+ pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ }
+
+ return 0;
+}
+
+static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN);
+}
+
+static const struct clk_ops sam9x5_main_ops = {
+ .prepare = clk_sam9x5_main_prepare,
+ .is_prepared = clk_sam9x5_main_is_prepared,
+ .recalc_rate = clk_sam9x5_main_recalc_rate,
+ .set_parent = clk_sam9x5_main_set_parent,
+ .get_parent = clk_sam9x5_main_get_parent,
};
static struct clk * __init
-at91_clk_register_main(struct at91_pmc *pmc,
- unsigned int irq,
- const char *name,
- const char *parent_name,
- unsigned long rate)
+at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
{
int ret;
- struct clk_main *clkmain;
+ struct clk_sam9x5_main *clkmain;
struct clk *clk = NULL;
struct clk_init_data init;
if (!pmc || !irq || !name)
return ERR_PTR(-EINVAL);
- if (!rate && !parent_name)
+ if (!parent_names || !num_parents)
return ERR_PTR(-EINVAL);
clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
@@ -130,19 +577,20 @@ at91_clk_register_main(struct at91_pmc *pmc,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &main_ops;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.flags = parent_name ? 0 : CLK_IS_ROOT;
+ init.ops = &sam9x5_main_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_PARENT_GATE;
clkmain->hw.init = &init;
- clkmain->rate = rate;
clkmain->pmc = pmc;
clkmain->irq = irq;
+ clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) &
+ AT91_PMC_MOSCEN);
init_waitqueue_head(&clkmain->wait);
irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
- ret = request_irq(clkmain->irq, clk_main_irq_handler,
- IRQF_TRIGGER_HIGH, "clk-main", clkmain);
+ ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
+ IRQF_TRIGGER_HIGH, name, clkmain);
if (ret)
return ERR_PTR(ret);
@@ -155,33 +603,36 @@ at91_clk_register_main(struct at91_pmc *pmc,
return clk;
}
-
-
-static void __init
-of_at91_clk_main_setup(struct device_node *np, struct at91_pmc *pmc)
+void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
{
struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
unsigned int irq;
- const char *parent_name;
const char *name = np->name;
- u32 rate = 0;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 2)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
- parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &rate);
+
irq = irq_of_parse_and_map(np, 0);
if (!irq)
return;
- clk = at91_clk_register_main(pmc, irq, name, parent_name, rate);
+ clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names,
+ num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-
-void __init of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_main_setup(np, pmc);
-}
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
new file mode 100644
index 000000000000..0300c46ee247
--- /dev/null
+++ b/drivers/clk/at91/clk-slow.c
@@ -0,0 +1,467 @@
+/*
+ * drivers/clk/at91/clk-slow.c
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+#include "sckc.h"
+
+#define SLOW_CLOCK_FREQ 32768
+#define SLOWCK_SW_CYCLES 5
+#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+
+#define AT91_SCKC_CR 0x00
+#define AT91_SCKC_RCEN (1 << 0)
+#define AT91_SCKC_OSC32EN (1 << 1)
+#define AT91_SCKC_OSC32BYP (1 << 2)
+#define AT91_SCKC_OSCSEL (1 << 3)
+
+struct clk_slow_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw)
+
+struct clk_slow_rc_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long frequency;
+ unsigned long accuracy;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw)
+
+struct clk_sam9260_slow {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
+
+struct clk_sam9x5_slow {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ u8 parent;
+};
+
+#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+
+
+static int clk_slow_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return 0;
+
+ writel(tmp | AT91_SCKC_OSC32EN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return;
+
+ writel(tmp & ~AT91_SCKC_OSC32EN, sckcr);
+}
+
+static int clk_slow_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return 1;
+
+ return !!(tmp & AT91_SCKC_OSC32EN);
+}
+
+static const struct clk_ops slow_osc_ops = {
+ .prepare = clk_slow_osc_prepare,
+ .unprepare = clk_slow_osc_unprepare,
+ .is_prepared = clk_slow_osc_is_prepared,
+};
+
+static struct clk * __init
+at91_clk_register_slow_osc(void __iomem *sckcr,
+ const char *name,
+ const char *parent_name,
+ unsigned long startup,
+ bool bypass)
+{
+ struct clk_slow_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_osc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->startup_usec = startup;
+
+ if (bypass)
+ writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP,
+ sckcr);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk))
+ kfree(osc);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 startup;
+ bool bypass;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ clk = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
+ bypass);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->frequency;
+}
+
+static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_acc)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->accuracy;
+}
+
+static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_rc_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr);
+}
+
+static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return !!(readl(osc->sckcr) & AT91_SCKC_RCEN);
+}
+
+static const struct clk_ops slow_rc_osc_ops = {
+ .prepare = clk_slow_rc_osc_prepare,
+ .unprepare = clk_slow_rc_osc_unprepare,
+ .is_prepared = clk_slow_rc_osc_is_prepared,
+ .recalc_rate = clk_slow_rc_osc_recalc_rate,
+ .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy,
+};
+
+static struct clk * __init
+at91_clk_register_slow_rc_osc(void __iomem *sckcr,
+ const char *name,
+ unsigned long frequency,
+ unsigned long accuracy,
+ unsigned long startup)
+{
+ struct clk_slow_rc_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_rc_osc_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->frequency = frequency;
+ osc->accuracy = accuracy;
+ osc->startup_usec = startup;
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk))
+ kfree(osc);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ u32 startup = 0;
+ const char *name = np->name;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+
+ clk = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
+ startup);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+ void __iomem *sckcr = slowck->sckcr;
+ u32 tmp;
+
+ if (index > 1)
+ return -EINVAL;
+
+ tmp = readl(sckcr);
+
+ if ((!index && !(tmp & AT91_SCKC_OSCSEL)) ||
+ (index && (tmp & AT91_SCKC_OSCSEL)))
+ return 0;
+
+ if (index)
+ tmp |= AT91_SCKC_OSCSEL;
+ else
+ tmp &= ~AT91_SCKC_OSCSEL;
+
+ writel(tmp, sckcr);
+
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+
+ return 0;
+}
+
+static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+
+ return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL);
+}
+
+static const struct clk_ops sam9x5_slow_ops = {
+ .set_parent = clk_sam9x5_slow_set_parent,
+ .get_parent = clk_sam9x5_slow_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_slow(void __iomem *sckcr,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ struct clk_sam9x5_slow *slowck;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name || !parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
+ if (!slowck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_slow_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ slowck->hw.init = &init;
+ slowck->sckcr = sckcr;
+ slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL);
+
+ clk = clk_register(NULL, &slowck->hw);
+ if (IS_ERR(clk))
+ kfree(slowck);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
+ const char *name = np->name;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 2)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
+
+ return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL);
+}
+
+static const struct clk_ops sam9260_slow_ops = {
+ .get_parent = clk_sam9260_slow_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ struct clk_sam9260_slow *slowck;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
+ if (!slowck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9260_slow_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ slowck->hw.init = &init;
+ slowck->pmc = pmc;
+
+ clk = clk_register(NULL, &slowck->hw);
+ if (IS_ERR(clk))
+ kfree(slowck);
+
+ return clk;
+}
+
+void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
+ const char *name = np->name;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 1)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_sam9260_slow(pmc, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 6a61477a57e0..524196bb35a5 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -229,11 +229,28 @@ out_free_pmc:
}
static const struct of_device_id pmc_clk_ids[] __initconst = {
+ /* Slow oscillator */
+ {
+ .compatible = "atmel,at91sam9260-clk-slow",
+ .data = of_at91sam9260_clk_slow_setup,
+ },
/* Main clock */
{
+ .compatible = "atmel,at91rm9200-clk-main-osc",
+ .data = of_at91rm9200_clk_main_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-main-rc-osc",
+ .data = of_at91sam9x5_clk_main_rc_osc_setup,
+ },
+ {
.compatible = "atmel,at91rm9200-clk-main",
.data = of_at91rm9200_clk_main_setup,
},
+ {
+ .compatible = "atmel,at91sam9x5-clk-main",
+ .data = of_at91sam9x5_clk_main_setup,
+ },
/* PLL clocks */
{
.compatible = "atmel,at91rm9200-clk-pll",
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 441350983ccb..6c7625976113 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -58,8 +58,17 @@ static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
+extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
struct at91_pmc *pmc);
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
new file mode 100644
index 000000000000..1184d76a7ab7
--- /dev/null
+++ b/drivers/clk/at91/sckc.c
@@ -0,0 +1,57 @@
+/*
+ * drivers/clk/at91/sckc.c
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "sckc.h"
+
+static const struct of_device_id sckc_clk_ids[] __initconst = {
+ /* Slow clock */
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow-osc",
+ .data = of_at91sam9x5_clk_slow_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow-rc-osc",
+ .data = of_at91sam9x5_clk_slow_rc_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow",
+ .data = of_at91sam9x5_clk_slow_setup,
+ },
+ { /*sentinel*/ }
+};
+
+static void __init of_at91sam9x5_sckc_setup(struct device_node *np)
+{
+ struct device_node *childnp;
+ void (*clk_setup)(struct device_node *, void __iomem *);
+ const struct of_device_id *clk_id;
+ void __iomem *regbase = of_iomap(np, 0);
+
+ if (!regbase)
+ return;
+
+ for_each_child_of_node(np, childnp) {
+ clk_id = of_match_node(sckc_clk_ids, childnp);
+ if (!clk_id)
+ continue;
+ clk_setup = clk_id->data;
+ clk_setup(childnp, regbase);
+ }
+}
+CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc",
+ of_at91sam9x5_sckc_setup);
diff --git a/drivers/clk/at91/sckc.h b/drivers/clk/at91/sckc.h
new file mode 100644
index 000000000000..836fcf59820f
--- /dev/null
+++ b/drivers/clk/at91/sckc.h
@@ -0,0 +1,22 @@
+/*
+ * drivers/clk/at91/sckc.h
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __AT91_SCKC_H_
+#define __AT91_SCKC_H_
+
+extern void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
+ void __iomem *sckcr);
+extern void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
+ void __iomem *sckcr);
+extern void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
+ void __iomem *sckcr);
+
+#endif /* __AT91_SCKC_H_ */
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 4637697c139f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
static int _round_up_table(const struct clk_div_table *table, int div)
{
const struct clk_div_table *clkt;
- int up = _get_table_maxdiv(table);
+ int up = INT_MAX;
for (clkt = table; clkt->div; clkt++) {
if (clkt->div == div)
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
new file mode 100644
index 000000000000..84196ecdaa12
--- /dev/null
+++ b/drivers/clk/samsung/Kconfig
@@ -0,0 +1,26 @@
+config COMMON_CLK_SAMSUNG
+ bool
+ select COMMON_CLK
+
+config S3C2410_COMMON_CLK
+ bool
+ select COMMON_CLK_SAMSUNG
+ help
+ Build the s3c2410 clock driver based on the common clock framework.
+
+config S3C2410_COMMON_DCLK
+ bool
+ select COMMON_CLK_SAMSUNG
+ select REGMAP_MMIO
+ help
+ Temporary symbol to build the dclk driver based on the common clock
+ framework.
+
+config S3C2412_COMMON_CLK
+ bool
+ select COMMON_CLK_SAMSUNG
+
+config S3C2443_COMMON_CLK
+ bool
+ select COMMON_CLK_SAMSUNG
+
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 8eb4799237f0..69e81773164e 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -3,9 +3,16 @@
#
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
+obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
+obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
+obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
+obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
+obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
+obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
+obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
new file mode 100644
index 000000000000..7a17bd40d1dd
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos3250 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/exynos3250.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define SRC_LEFTBUS 0x4200
+#define DIV_LEFTBUS 0x4500
+#define GATE_IP_LEFTBUS 0x4800
+#define SRC_RIGHTBUS 0x8200
+#define DIV_RIGHTBUS 0x8500
+#define GATE_IP_RIGHTBUS 0x8800
+#define GATE_IP_PERIR 0x8960
+#define MPLL_LOCK 0xc010
+#define MPLL_CON0 0xc110
+#define VPLL_LOCK 0xc020
+#define VPLL_CON0 0xc120
+#define UPLL_LOCK 0xc030
+#define UPLL_CON0 0xc130
+#define SRC_TOP0 0xc210
+#define SRC_TOP1 0xc214
+#define SRC_CAM 0xc220
+#define SRC_MFC 0xc228
+#define SRC_G3D 0xc22c
+#define SRC_LCD 0xc234
+#define SRC_ISP 0xc238
+#define SRC_FSYS 0xc240
+#define SRC_PERIL0 0xc250
+#define SRC_PERIL1 0xc254
+#define SRC_MASK_TOP 0xc310
+#define SRC_MASK_CAM 0xc320
+#define SRC_MASK_LCD 0xc334
+#define SRC_MASK_ISP 0xc338
+#define SRC_MASK_FSYS 0xc340
+#define SRC_MASK_PERIL0 0xc350
+#define SRC_MASK_PERIL1 0xc354
+#define DIV_TOP 0xc510
+#define DIV_CAM 0xc520
+#define DIV_MFC 0xc528
+#define DIV_G3D 0xc52c
+#define DIV_LCD 0xc534
+#define DIV_ISP 0xc538
+#define DIV_FSYS0 0xc540
+#define DIV_FSYS1 0xc544
+#define DIV_FSYS2 0xc548
+#define DIV_PERIL0 0xc550
+#define DIV_PERIL1 0xc554
+#define DIV_PERIL3 0xc55c
+#define DIV_PERIL4 0xc560
+#define DIV_PERIL5 0xc564
+#define DIV_CAM1 0xc568
+#define CLKDIV2_RATIO 0xc580
+#define GATE_SCLK_CAM 0xc820
+#define GATE_SCLK_MFC 0xc828
+#define GATE_SCLK_G3D 0xc82c
+#define GATE_SCLK_LCD 0xc834
+#define GATE_SCLK_ISP_TOP 0xc838
+#define GATE_SCLK_FSYS 0xc840
+#define GATE_SCLK_PERIL 0xc850
+#define GATE_IP_CAM 0xc920
+#define GATE_IP_MFC 0xc928
+#define GATE_IP_G3D 0xc92c
+#define GATE_IP_LCD 0xc934
+#define GATE_IP_ISP 0xc938
+#define GATE_IP_FSYS 0xc940
+#define GATE_IP_PERIL 0xc950
+#define GATE_BLOCK 0xc970
+#define APLL_LOCK 0x14000
+#define APLL_CON0 0x14100
+#define SRC_CPU 0x14200
+#define DIV_CPU0 0x14500
+#define DIV_CPU1 0x14504
+
+/* list of PLLs to be registered */
+enum exynos3250_plls {
+ apll, mpll, vpll, upll,
+ nr_plls
+};
+
+static void __iomem *reg_base;
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos3250_clk_regs;
+
+static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
+ SRC_LEFTBUS,
+ DIV_LEFTBUS,
+ GATE_IP_LEFTBUS,
+ SRC_RIGHTBUS,
+ DIV_RIGHTBUS,
+ GATE_IP_RIGHTBUS,
+ GATE_IP_PERIR,
+ MPLL_LOCK,
+ MPLL_CON0,
+ VPLL_LOCK,
+ VPLL_CON0,
+ UPLL_LOCK,
+ UPLL_CON0,
+ SRC_TOP0,
+ SRC_TOP1,
+ SRC_CAM,
+ SRC_MFC,
+ SRC_G3D,
+ SRC_LCD,
+ SRC_ISP,
+ SRC_FSYS,
+ SRC_PERIL0,
+ SRC_PERIL1,
+ SRC_MASK_TOP,
+ SRC_MASK_CAM,
+ SRC_MASK_LCD,
+ SRC_MASK_ISP,
+ SRC_MASK_FSYS,
+ SRC_MASK_PERIL0,
+ SRC_MASK_PERIL1,
+ DIV_TOP,
+ DIV_CAM,
+ DIV_MFC,
+ DIV_G3D,
+ DIV_LCD,
+ DIV_ISP,
+ DIV_FSYS0,
+ DIV_FSYS1,
+ DIV_FSYS2,
+ DIV_PERIL0,
+ DIV_PERIL1,
+ DIV_PERIL3,
+ DIV_PERIL4,
+ DIV_PERIL5,
+ DIV_CAM1,
+ CLKDIV2_RATIO,
+ GATE_SCLK_CAM,
+ GATE_SCLK_MFC,
+ GATE_SCLK_G3D,
+ GATE_SCLK_LCD,
+ GATE_SCLK_ISP_TOP,
+ GATE_SCLK_FSYS,
+ GATE_SCLK_PERIL,
+ GATE_IP_CAM,
+ GATE_IP_MFC,
+ GATE_IP_G3D,
+ GATE_IP_LCD,
+ GATE_IP_ISP,
+ GATE_IP_FSYS,
+ GATE_IP_PERIL,
+ GATE_BLOCK,
+ APLL_LOCK,
+ SRC_CPU,
+ DIV_CPU0,
+ DIV_CPU1,
+};
+
+static int exynos3250_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, exynos3250_clk_regs,
+ ARRAY_SIZE(exynos3250_cmu_clk_regs));
+ return 0;
+}
+
+static void exynos3250_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, exynos3250_clk_regs,
+ ARRAY_SIZE(exynos3250_cmu_clk_regs));
+}
+
+static struct syscore_ops exynos3250_clk_syscore_ops = {
+ .suspend = exynos3250_clk_suspend,
+ .resume = exynos3250_clk_resume,
+};
+
+static void exynos3250_clk_sleep_init(void)
+{
+ exynos3250_clk_regs =
+ samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
+ ARRAY_SIZE(exynos3250_cmu_clk_regs));
+ if (!exynos3250_clk_regs) {
+ pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+ goto err;
+ }
+
+ register_syscore_ops(&exynos3250_clk_syscore_ops);
+ return;
+err:
+ kfree(exynos3250_clk_regs);
+}
+#else
+static inline void exynos3250_clk_sleep_init(void) { }
+#endif
+
+/* list of all parent clock list */
+PNAME(mout_vpllsrc_p) = { "fin_pll", };
+
+PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
+PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
+PNAME(mout_vpll_p) = { "fin_pll", "fout_vpll", };
+PNAME(mout_upll_p) = { "fin_pll", "fout_upll", };
+
+PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", };
+PNAME(mout_epll_user_p) = { "fin_pll", "mout_epll", };
+PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", };
+PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", };
+
+PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", };
+PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_vpll", };
+
+PNAME(mout_gdl_p) = { "mout_mpll_user_l", };
+PNAME(mout_gdr_p) = { "mout_mpll_user_r", };
+
+PNAME(mout_aclk_400_mcuisp_sub_p)
+ = { "fin_pll", "div_aclk_400_mcuisp", };
+PNAME(mout_aclk_266_0_p) = { "div_mpll_pre", "mout_vpll", };
+PNAME(mout_aclk_266_1_p) = { "mout_epll_user", };
+PNAME(mout_aclk_266_p) = { "mout_aclk_266_0", "mout_aclk_266_1", };
+PNAME(mout_aclk_266_sub_p) = { "fin_pll", "div_aclk_266", };
+
+PNAME(group_div_mpll_pre_p) = { "div_mpll_pre", };
+PNAME(group_epll_vpll_p) = { "mout_epll_user", "mout_vpll" };
+PNAME(group_sclk_p) = { "xxti", "xusbxti",
+ "none", "none",
+ "none", "none", "div_mpll_pre",
+ "mout_epll_user", "mout_vpll", };
+PNAME(group_sclk_audio_p) = { "audiocdclk", "none",
+ "none", "none",
+ "xxti", "xusbxti",
+ "div_mpll_pre", "mout_epll_user",
+ "mout_vpll", };
+PNAME(group_sclk_cam_blk_p) = { "xxti", "xusbxti",
+ "none", "none", "none",
+ "none", "div_mpll_pre",
+ "mout_epll_user", "mout_vpll",
+ "div_cam_blk_320", };
+PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti",
+ "m_bitclkhsdiv4_2l", "none",
+ "none", "none", "div_mpll_pre",
+ "mout_epll_user", "mout_vpll",
+ "none", "none", "none",
+ "div_lcd_blk_145", };
+
+PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" };
+PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" };
+
+static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "sclk_mpll_1600", "mout_mpll", 1, 1, 0),
+ FFACTOR(0, "sclk_mpll_mif", "mout_mpll", 1, 2, 0),
+ FFACTOR(0, "sclk_bpll", "fout_bpll", 1, 2, 0),
+ FFACTOR(0, "div_cam_blk_320", "sclk_mpll_1600", 1, 5, 0),
+ FFACTOR(0, "div_lcd_blk_145", "sclk_mpll_1600", 1, 11, 0),
+
+ /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
+ FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
+};
+
+static struct samsung_mux_clock mux_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* SRC_LEFTBUS */
+ MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p,
+ SRC_LEFTBUS, 4, 1),
+ MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1),
+
+ /* SRC_RIGHTBUS */
+ MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p,
+ SRC_RIGHTBUS, 4, 1),
+ MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1),
+
+ /* SRC_TOP0 */
+ MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
+ MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p,SRC_TOP0, 24, 1),
+ MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_div_mpll_pre_p, SRC_TOP0, 20, 1),
+ MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_div_mpll_pre_p, SRC_TOP0, 16, 1),
+ MUX(CLK_MOUT_ACLK_266_1, "mout_aclk_266_1", mout_aclk_266_1_p, SRC_TOP0, 14, 1),
+ MUX(CLK_MOUT_ACLK_266_0, "mout_aclk_266_0", mout_aclk_266_0_p, SRC_TOP0, 13, 1),
+ MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p, SRC_TOP0, 12, 1),
+ MUX(CLK_MOUT_VPLL, "mout_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_EPLL_USER, "mout_epll_user", mout_epll_user_p, SRC_TOP0, 4, 1),
+ MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1),
+
+ /* SRC_TOP1 */
+ MUX(CLK_MOUT_UPLL, "mout_upll", mout_upll_p, SRC_TOP1, 28, 1),
+ MUX(CLK_MOUT_ACLK_400_MCUISP_SUB, "mout_aclk_400_mcuisp_sub", mout_aclk_400_mcuisp_sub_p,
+ SRC_TOP1, 24, 1),
+ MUX(CLK_MOUT_ACLK_266_SUB, "mout_aclk_266_sub", mout_aclk_266_sub_p, SRC_TOP1, 20, 1),
+ MUX(CLK_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_TOP1, 12, 1),
+ MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp", group_div_mpll_pre_p, SRC_TOP1, 8, 1),
+ MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+
+ /* SRC_CAM */
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group_sclk_p, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_CAM_BLK, "mout_cam_blk", group_sclk_cam_blk_p, SRC_CAM, 0, 4),
+
+ /* SRC_MFC */
+ MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+ MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_vpll_p, SRC_MFC, 4, 1),
+ MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_div_mpll_pre_p, SRC_MFC, 0, 1),
+
+ /* SRC_G3D */
+ MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1),
+ MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_vpll_p, SRC_G3D, 4, 1),
+ MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_div_mpll_pre_p, SRC_G3D, 0, 1),
+
+ /* SRC_LCD */
+ MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_sclk_p, SRC_LCD, 12, 4),
+ MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4),
+
+ /* SRC_ISP */
+ MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_sclk_p, SRC_ISP, 12, 4),
+ MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_sclk_p, SRC_ISP, 8, 4),
+ MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_sclk_p, SRC_ISP, 4, 4),
+
+ /* SRC_FSYS */
+ MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
+ MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 3),
+ MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 3),
+
+ /* SRC_PERIL0 */
+ MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
+ MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
+
+ /* SRC_PERIL1 */
+ MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4),
+ MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4),
+ MUX(CLK_MOUT_AUDIO, "mout_audio", group_sclk_audio_p, SRC_PERIL1, 4, 4),
+
+ /* SRC_CPU */
+ MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
+ SRC_CPU, 24, 1),
+ MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
+ MUX(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1),
+ MUX(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+};
+
+static struct samsung_div_clock div_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* DIV_LEFTBUS */
+ DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
+ DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4),
+
+ /* DIV_RIGHTBUS */
+ DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
+ DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4),
+
+ /* DIV_TOP */
+ DIV(CLK_DIV_MPLL_PRE, "div_mpll_pre", "sclk_mpll_mif", DIV_TOP, 28, 2),
+ DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp",
+ "mout_aclk_400_mcuisp", DIV_TOP, 24, 3),
+ DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3),
+ DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3),
+ DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3),
+ DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4),
+ DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3),
+
+ /* DIV_CAM */
+ DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+ DIV(CLK_DIV_CAM_BLK, "div_cam_blk", "mout_cam_blk", DIV_CAM, 0, 4),
+
+ /* DIV_MFC */
+ DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4),
+
+ /* DIV_G3D */
+ DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4),
+
+ /* DIV_LCD */
+ DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4),
+ DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4),
+
+ /* DIV_ISP */
+ DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4),
+ DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp",
+ DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
+ DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
+ DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 0, 4),
+
+ /* DIV_FSYS0 */
+ DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4),
+
+ /* DIV_FSYS1 */
+ DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+
+ /* DIV_PERIL0 */
+ DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+ DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+
+ /* DIV_PERIL1 */
+ DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+ DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+
+ /* DIV_PERIL4 */
+ DIV(CLK_DIV_PCM, "div_pcm", "div_audio", DIV_PERIL4, 20, 8),
+ DIV(CLK_DIV_AUDIO, "div_audio", "mout_audio", DIV_PERIL4, 16, 4),
+
+ /* DIV_PERIL5 */
+ DIV(CLK_DIV_I2S, "div_i2s", "div_audio", DIV_PERIL5, 8, 6),
+
+ /* DIV_CPU0 */
+ DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3),
+ DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3),
+ DIV(CLK_DIV_COREM, "div_corem", "div_core2", DIV_CPU0, 4, 3),
+ DIV(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3),
+
+ /* DIV_CPU1 */
+ DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
+ DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
+};
+
+static struct samsung_gate_clock gate_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* GATE_IP_LEFTBUS */
+ GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_IP_RIGHTBUS */
+ GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100", GATE_IP_RIGHTBUS, 2,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100", GATE_IP_RIGHTBUS, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100", GATE_IP_RIGHTBUS, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_IP_PERIR */
+ GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100",
+ GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100",
+ GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100",
+ GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0),
+ GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0),
+ GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0),
+ GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0),
+ GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_SCLK_CAM */
+ GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_cam_blk",
+ GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_M2MSCALER, "sclk_m2mscaler", "div_cam_blk",
+ GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_GSCALER1, "sclk_gscaler1", "div_cam_blk",
+ GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_GSCALER0, "sclk_gscaler0", "div_cam_blk",
+ GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_MFC */
+ GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc",
+ GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_G3D */
+ GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d",
+ GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_LCD */
+ GATE(CLK_SCLK_MIPIDPHY2L, "sclk_mipidphy2l", "div_mipi0",
+ GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre",
+ GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0",
+ GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_ISP_TOP */
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
+ GATE_SCLK_ISP_TOP, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
+ GATE_SCLK_ISP_TOP, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp",
+ GATE_SCLK_ISP_TOP, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp",
+ GATE_SCLK_ISP_TOP, 1, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_FSYS */
+ GATE(CLK_SCLK_UPLL, "sclk_upll", "mout_upll", GATE_SCLK_FSYS, 10, 0, 0),
+ GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre",
+ GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
+ GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
+ GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
+ GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_PERIL */
+ GATE(CLK_SCLK_I2S, "sclk_i2s", "div_i2s",
+ GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PCM, "sclk_pcm", "div_pcm",
+ GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre",
+ GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
+ GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
+ GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
+ GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_IP_CAM */
+ GATE(CLK_QEJPEG, "qejpeg", "div_cam_blk_320", GATE_IP_CAM, 19,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_cam_blk_320",
+ GATE_IP_CAM, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_cam_blk_320",
+ GATE_IP_CAM, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUCAMIF, "ppmucamif", "div_cam_blk_320",
+ GATE_IP_CAM, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QEM2MSCALER, "qem2mscaler", "div_cam_blk_320",
+ GATE_IP_CAM, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QEGSCALER1, "qegscaler1", "div_cam_blk_320",
+ GATE_IP_CAM, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QEGSCALER0, "qegscaler0", "div_cam_blk_320",
+ GATE_IP_CAM, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUJPEG, "smmujpeg", "div_cam_blk_320",
+ GATE_IP_CAM, 11, 0, 0),
+ GATE(CLK_SMMUM2M2SCALER, "smmum2m2scaler", "div_cam_blk_320",
+ GATE_IP_CAM, 9, 0, 0),
+ GATE(CLK_SMMUGSCALER1, "smmugscaler1", "div_cam_blk_320",
+ GATE_IP_CAM, 8, 0, 0),
+ GATE(CLK_SMMUGSCALER0, "smmugscaler0", "div_cam_blk_320",
+ GATE_IP_CAM, 7, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "div_cam_blk_320", GATE_IP_CAM, 6, 0, 0),
+ GATE(CLK_M2MSCALER, "m2mscaler", "div_cam_blk_320",
+ GATE_IP_CAM, 2, 0, 0),
+ GATE(CLK_GSCALER1, "gscaler1", "div_cam_blk_320", GATE_IP_CAM, 1, 0, 0),
+ GATE(CLK_GSCALER0, "gscaler0", "div_cam_blk_320", GATE_IP_CAM, 0, 0, 0),
+
+ /* GATE_IP_MFC */
+ GATE(CLK_QEMFC, "qemfc", "div_aclk_200", GATE_IP_MFC, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0),
+ GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0),
+
+ /* GATE_IP_G3D */
+ GATE(CLK_SMMUG3D, "smmug3d", "div_aclk_200", GATE_IP_G3D, 3, 0, 0),
+ GATE(CLK_QEG3D, "qeg3d", "div_aclk_200", GATE_IP_G3D, 2,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0),
+
+ /* GATE_IP_LCD */
+ GATE(CLK_QE_CH1_LCD, "qe_ch1_lcd", "div_aclk_160", GATE_IP_LCD, 7,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_CH0_LCD, "qe_ch0_lcd", "div_aclk_160", GATE_IP_LCD, 6,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0),
+ GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0),
+ GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0),
+ GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0),
+
+ /* GATE_IP_ISP */
+ GATE(CLK_CAM1, "cam1", "mout_aclk_266_sub", GATE_IP_ISP, 5, 0, 0),
+ GATE(CLK_UART_ISP_TOP, "uart_isp_top", "mout_aclk_266_sub",
+ GATE_IP_ISP, 3, 0, 0),
+ GATE(CLK_SPI1_ISP_TOP, "spi1_isp_top", "mout_aclk_266_sub",
+ GATE_IP_ISP, 2, 0, 0),
+ GATE(CLK_SPI0_ISP_TOP, "spi0_isp_top", "mout_aclk_266_sub",
+ GATE_IP_ISP, 1, 0, 0),
+
+ /* GATE_IP_FSYS */
+ GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0),
+ GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_USBOTG, "usbotg", "div_aclk_200", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
+ GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
+ GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0),
+
+ /* GATE_IP_PERIL */
+ GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(CLK_PCM, "pcm", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0),
+ GATE(CLK_I2S, "i2s", "div_aclk_100", GATE_IP_PERIL, 21, 0, 0),
+ GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0),
+ GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
+ GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
+ GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
+};
+
+/* APLL & MPLL & BPLL & UPLL */
+static struct samsung_pll_rate_table exynos3250_pll_rates[] = {
+ PLL_35XX_RATE(1200000000, 400, 4, 1),
+ PLL_35XX_RATE(1100000000, 275, 3, 1),
+ PLL_35XX_RATE(1066000000, 533, 6, 1),
+ PLL_35XX_RATE(1000000000, 250, 3, 1),
+ PLL_35XX_RATE( 960000000, 320, 4, 1),
+ PLL_35XX_RATE( 900000000, 300, 4, 1),
+ PLL_35XX_RATE( 850000000, 425, 6, 1),
+ PLL_35XX_RATE( 800000000, 200, 3, 1),
+ PLL_35XX_RATE( 700000000, 175, 3, 1),
+ PLL_35XX_RATE( 667000000, 667, 12, 1),
+ PLL_35XX_RATE( 600000000, 400, 4, 2),
+ PLL_35XX_RATE( 533000000, 533, 6, 2),
+ PLL_35XX_RATE( 520000000, 260, 3, 2),
+ PLL_35XX_RATE( 500000000, 250, 3, 2),
+ PLL_35XX_RATE( 400000000, 200, 3, 2),
+ PLL_35XX_RATE( 200000000, 200, 3, 3),
+ PLL_35XX_RATE( 100000000, 200, 3, 4),
+ { /* sentinel */ }
+};
+
+/* VPLL */
+static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
+ PLL_36XX_RATE(600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(533000000, 266, 3, 2, 32768),
+ PLL_36XX_RATE(519230987, 173, 2, 2, 5046),
+ PLL_36XX_RATE(500000000, 250, 3, 2, 0),
+ PLL_36XX_RATE(445500000, 148, 2, 2, 32768),
+ PLL_36XX_RATE(445055007, 148, 2, 2, 23047),
+ PLL_36XX_RATE(400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(371250000, 123, 2, 2, 49152),
+ PLL_36XX_RATE(370878997, 185, 3, 2, 28803),
+ PLL_36XX_RATE(340000000, 170, 3, 2, 0),
+ PLL_36XX_RATE(335000015, 111, 2, 2, 43691),
+ PLL_36XX_RATE(333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(330000000, 110, 2, 2, 0),
+ PLL_36XX_RATE(320000015, 106, 2, 2, 43691),
+ PLL_36XX_RATE(300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(275000000, 275, 3, 3, 0),
+ PLL_36XX_RATE(222750000, 148, 2, 3, 32768),
+ PLL_36XX_RATE(222528007, 148, 2, 3, 23069),
+ PLL_36XX_RATE(160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(148500000, 99, 2, 3, 0),
+ PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
+ PLL_36XX_RATE(108000000, 144, 2, 4, 0),
+ PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
+ PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
+ PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
+ PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON0, NULL),
+ [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
+ VPLL_LOCK, VPLL_CON0, NULL),
+ [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
+ UPLL_LOCK, UPLL_CON0, NULL),
+};
+
+static void __init exynos3250_cmu_init(struct device_node *np)
+{
+ struct samsung_clk_provider *ctx;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
+ exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
+ exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
+ exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
+
+ samsung_clk_register_pll(ctx, exynos3250_plls,
+ ARRAY_SIZE(exynos3250_plls), reg_base);
+
+ samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
+ samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
+ samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
+
+ exynos3250_clk_sleep_init();
+}
+CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index b4f967210175..c4df294bb7fb 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -428,7 +428,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
@@ -903,7 +903,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
- GATE(CLK_MDMA2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_MDMA, "mdma", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0,
0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
@@ -1043,7 +1043,7 @@ static unsigned long exynos4_get_xom(void)
return xom;
}
-static void __init exynos4_clk_register_finpll(void)
+static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
{
struct samsung_fixed_rate_clock fclk;
struct clk *clk;
@@ -1066,7 +1066,7 @@ static void __init exynos4_clk_register_finpll(void)
fclk.parent_name = NULL;
fclk.flags = CLK_IS_ROOT;
fclk.fixed_rate = finpll_f;
- samsung_clk_register_fixed_rate(&fclk, 1);
+ samsung_clk_register_fixed_rate(ctx, &fclk, 1);
}
@@ -1176,22 +1176,25 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
{
+ struct samsung_clk_provider *ctx;
exynos4_soc = soc;
reg_base = of_iomap(np, 0);
if (!reg_base)
panic("%s: failed to map registers\n", __func__);
- samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
- samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
+ samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
ext_clk_match);
- exynos4_clk_register_finpll();
+ exynos4_clk_register_finpll(ctx);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_mux(exynos4210_mux_early,
+ samsung_clk_register_mux(ctx, exynos4210_mux_early,
ARRAY_SIZE(exynos4210_mux_early));
if (_get_rate("fin_pll") == 24000000) {
@@ -1205,7 +1208,7 @@ static void __init exynos4_clk_init(struct device_node *np,
exynos4210_plls[vpll].rate_table =
exynos4210_vpll_rates;
- samsung_clk_register_pll(exynos4210_plls,
+ samsung_clk_register_pll(ctx, exynos4210_plls,
ARRAY_SIZE(exynos4210_plls), reg_base);
} else {
if (_get_rate("fin_pll") == 24000000) {
@@ -1217,42 +1220,42 @@ static void __init exynos4_clk_init(struct device_node *np,
exynos4x12_vpll_rates;
}
- samsung_clk_register_pll(exynos4x12_plls,
+ samsung_clk_register_pll(ctx, exynos4x12_plls,
ARRAY_SIZE(exynos4x12_plls), reg_base);
}
- samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks,
+ samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(exynos4_mux_clks,
+ samsung_clk_register_mux(ctx, exynos4_mux_clks,
ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(exynos4_div_clks,
+ samsung_clk_register_div(ctx, exynos4_div_clks,
ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(exynos4_gate_clks,
+ samsung_clk_register_gate(ctx, exynos4_gate_clks,
ARRAY_SIZE(exynos4_gate_clks));
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(exynos4210_fixed_rate_clks,
+ samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(exynos4210_mux_clks,
+ samsung_clk_register_mux(ctx, exynos4210_mux_clks,
ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(exynos4210_div_clks,
+ samsung_clk_register_div(ctx, exynos4210_div_clks,
ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(exynos4210_gate_clks,
+ samsung_clk_register_gate(ctx, exynos4210_gate_clks,
ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_alias(exynos4210_aliases,
+ samsung_clk_register_alias(ctx, exynos4210_aliases,
ARRAY_SIZE(exynos4210_aliases));
} else {
- samsung_clk_register_mux(exynos4x12_mux_clks,
+ samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(exynos4x12_div_clks,
+ samsung_clk_register_div(ctx, exynos4x12_div_clks,
ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(exynos4x12_gate_clks,
+ samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_alias(exynos4x12_aliases,
+ samsung_clk_register_alias(ctx, exynos4x12_aliases,
ARRAY_SIZE(exynos4x12_aliases));
}
- samsung_clk_register_alias(exynos4_aliases,
+ samsung_clk_register_alias(ctx, exynos4_aliases,
ARRAY_SIZE(exynos4_aliases));
exynos4_clk_sleep_init();
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index e7ee4420da81..1fad4c5e3f5d 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -24,10 +24,14 @@
#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
+#define PWR_CTRL1 0x1020
+#define PWR_CTRL2 0x1024
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
#define GATE_IP_ACP 0x8800
+#define GATE_IP_ISP0 0xc800
+#define GATE_IP_ISP1 0xc804
#define CPLL_LOCK 0x10020
#define EPLL_LOCK 0x10030
#define VPLL_LOCK 0x10040
@@ -37,6 +41,7 @@
#define VPLL_CON0 0x10140
#define GPLL_CON0 0x10150
#define SRC_TOP0 0x10210
+#define SRC_TOP1 0x10214
#define SRC_TOP2 0x10218
#define SRC_TOP3 0x1021c
#define SRC_GSCL 0x10220
@@ -71,6 +76,7 @@
#define GATE_IP_GSCL 0x10920
#define GATE_IP_DISP1 0x10928
#define GATE_IP_MFC 0x1092c
+#define GATE_IP_G3D 0x10930
#define GATE_IP_GEN 0x10934
#define GATE_IP_FSYS 0x10944
#define GATE_IP_PERIC 0x10950
@@ -80,6 +86,23 @@
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
+/*Below definitions are used for PWR_CTRL settings*/
+#define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN (1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN (1 << 8)
+#define PWR_CTRL1_USE_CORE1_WFE (1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE (1 << 4)
+#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
+
+#define PWR_CTRL2_DIV2_UP_EN (1 << 25)
+#define PWR_CTRL2_DIV1_UP_EN (1 << 24)
+#define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16)
+#define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8)
+#define PWR_CTRL2_CORE2_UP_RATIO (1 << 4)
+#define PWR_CTRL2_CORE1_UP_RATIO (1 << 0)
+
/* list of PLLs to be registered */
enum exynos5250_plls {
apll, mpll, cpll, epll, vpll, gpll, bpll,
@@ -98,8 +121,11 @@ static struct samsung_clk_reg_dump *exynos5250_save;
static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
+ PWR_CTRL1,
+ PWR_CTRL2,
SRC_CORE1,
SRC_TOP0,
+ SRC_TOP1,
SRC_TOP2,
SRC_TOP3,
SRC_GSCL,
@@ -133,6 +159,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
DIV_PERIC5,
GATE_IP_GSCL,
GATE_IP_MFC,
+ GATE_IP_G3D,
GATE_IP_GEN,
GATE_IP_FSYS,
GATE_IP_PERIC,
@@ -141,6 +168,8 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
PLL_DIV2_SEL,
GATE_IP_DISP1,
GATE_IP_ACP,
+ GATE_IP_ISP0,
+ GATE_IP_ISP1,
};
static int exynos5250_clk_suspend(void)
@@ -189,13 +218,16 @@ PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi27m" };
PNAME(mout_vpll_p) = { "mout_vpllsrc", "fout_vpll" };
PNAME(mout_cpll_p) = { "fin_pll", "fout_cpll" };
PNAME(mout_epll_p) = { "fin_pll", "fout_epll" };
+PNAME(mout_gpll_p) = { "fin_pll", "fout_gpll" };
PNAME(mout_mpll_user_p) = { "fin_pll", "mout_mpll" };
PNAME(mout_bpll_user_p) = { "fin_pll", "mout_bpll" };
PNAME(mout_aclk166_p) = { "mout_cpll", "mout_mpll_user" };
PNAME(mout_aclk200_p) = { "mout_mpll_user", "mout_bpll_user" };
+PNAME(mout_aclk400_p) = { "mout_aclk400_g3d_mid", "mout_gpll" };
PNAME(mout_aclk200_sub_p) = { "fin_pll", "div_aclk200" };
PNAME(mout_aclk266_sub_p) = { "fin_pll", "div_aclk266" };
PNAME(mout_aclk333_sub_p) = { "fin_pll", "div_aclk333" };
+PNAME(mout_aclk400_isp_sub_p) = { "fin_pll", "div_aclk400_isp" };
PNAME(mout_hdmi_p) = { "div_hdmi_pixel", "sclk_hdmiphy" };
PNAME(mout_usb3_p) = { "mout_mpll_user", "mout_cpll" };
PNAME(mout_group1_p) = { "fin_pll", "fin_pll", "sclk_hdmi27m",
@@ -273,15 +305,23 @@ static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(0, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
MUX(0, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
MUX(0, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
+ MUX(0, "mout_aclk400_g3d_mid", mout_aclk200_p, SRC_TOP0, 20, 1),
+
+ MUX(0, "mout_aclk400_isp", mout_aclk200_p, SRC_TOP1, 24, 1),
+ MUX(0, "mout_aclk400_g3d", mout_aclk400_p, SRC_TOP1, 28, 1),
MUX(0, "mout_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
MUX(0, "mout_epll", mout_epll_p, SRC_TOP2, 12, 1),
MUX(0, "mout_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
MUX(0, "mout_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
MUX(0, "mout_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
+ MUX(CLK_MOUT_GPLL, "mout_gpll", mout_gpll_p, SRC_TOP2, 28, 1),
MUX(0, "mout_aclk200_disp1_sub", mout_aclk200_sub_p, SRC_TOP3, 4, 1),
MUX(0, "mout_aclk266_gscl_sub", mout_aclk266_sub_p, SRC_TOP3, 8, 1),
+ MUX(0, "mout_aclk_266_isp_sub", mout_aclk266_sub_p, SRC_TOP3, 16, 1),
+ MUX(0, "mout_aclk_400_isp_sub", mout_aclk400_isp_sub_p,
+ SRC_TOP3, 20, 1),
MUX(0, "mout_aclk333_sub", mout_aclk333_sub_p, SRC_TOP3, 24, 1),
MUX(0, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
@@ -351,7 +391,10 @@ static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV(0, "div_aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
DIV(0, "div_aclk266", "mout_mpll_user", DIV_TOP0, 16, 3),
DIV(0, "div_aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
+ DIV(0, "div_aclk400_g3d", "mout_aclk400_g3d", DIV_TOP0,
+ 24, 3),
+ DIV(0, "div_aclk400_isp", "mout_aclk400_isp", DIV_TOP1, 20, 3),
DIV(0, "div_aclk66_pre", "mout_mpll_user", DIV_TOP1, 24, 3),
DIV(0, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
@@ -428,6 +471,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
* CMU_ACP
*/
GATE(CLK_MDMA0, "mdma0", "div_aclk266", GATE_IP_ACP, 1, 0, 0),
+ GATE(CLK_SSS, "sss", "div_aclk266", GATE_IP_ACP, 2, 0, 0),
GATE(CLK_G2D, "g2d", "div_aclk200", GATE_IP_ACP, 3, 0, 0),
GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "div_aclk266", GATE_IP_ACP, 5, 0, 0),
@@ -533,7 +577,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
0),
GATE(CLK_SMMU_MFCL, "smmu_mfcl", "mout_aclk333_sub", GATE_IP_MFC, 2, 0,
0),
-
+ GATE(CLK_G3D, "g3d", "div_aclk400_g3d", GATE_IP_G3D, 0,
+ CLK_SET_RATE_PARENT, 0),
GATE(CLK_ROTATOR, "rotator", "div_aclk266", GATE_IP_GEN, 1, 0, 0),
GATE(CLK_JPEG, "jpeg", "div_aclk166", GATE_IP_GEN, 2, 0, 0),
GATE(CLK_MDMA1, "mdma1", "div_aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -615,6 +660,31 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0),
GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
+ GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 2, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 8, 0, 0),
+ GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
+ GATE(CLK_SMMU_FIMC_ISP, "smmu_fimc_isp", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP0, 8, 0, 0),
+ GATE(CLK_SMMU_FIMC_DRC, "smmu_fimc_drc", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP0, 9, 0, 0),
+ GATE(CLK_SMMU_FIMC_FD, "smmu_fimc_fd", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP0, 10, 0, 0),
+ GATE(CLK_SMMU_FIMC_SCC, "smmu_fimc_scc", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP0, 11, 0, 0),
+ GATE(CLK_SMMU_FIMC_SCP, "smmu_fimc_scp", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP0, 12, 0, 0),
+ GATE(CLK_SMMU_FIMC_MCU, "smmu_fimc_mcu", "mout_aclk_400_isp_sub",
+ GATE_IP_ISP0, 13, 0, 0),
+ GATE(CLK_SMMU_FIMC_ODC, "smmu_fimc_odc", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP1, 4, 0, 0),
+ GATE(CLK_SMMU_FIMC_DIS0, "smmu_fimc_dis0", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP1, 5, 0, 0),
+ GATE(CLK_SMMU_FIMC_DIS1, "smmu_fimc_dis1", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP1, 6, 0, 0),
+ GATE(CLK_SMMU_FIMC_3DNR, "smmu_fimc_3dnr", "mout_aclk_266_isp_sub",
+ GATE_IP_ISP1, 7, 0, 0),
};
static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
@@ -686,6 +756,9 @@ static struct of_device_id ext_clk_match[] __initdata = {
/* register exynox5250 clocks */
static void __init exynos5250_clk_init(struct device_node *np)
{
+ struct samsung_clk_provider *ctx;
+ unsigned int tmp;
+
if (np) {
reg_base = of_iomap(np, 0);
if (!reg_base)
@@ -694,11 +767,13 @@ static void __init exynos5250_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+ samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
ext_clk_match);
- samsung_clk_register_mux(exynos5250_pll_pmux_clks,
+ samsung_clk_register_mux(ctx, exynos5250_pll_pmux_clks,
ARRAY_SIZE(exynos5250_pll_pmux_clks));
if (_get_rate("fin_pll") == 24 * MHZ) {
@@ -709,19 +784,40 @@ static void __init exynos5250_clk_init(struct device_node *np)
if (_get_rate("mout_vpllsrc") == 24 * MHZ)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
- samsung_clk_register_pll(exynos5250_plls, ARRAY_SIZE(exynos5250_plls),
- reg_base);
- samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks,
+ samsung_clk_register_pll(ctx, exynos5250_plls,
+ ARRAY_SIZE(exynos5250_plls),
+ reg_base);
+ samsung_clk_register_fixed_rate(ctx, exynos5250_fixed_rate_clks,
ARRAY_SIZE(exynos5250_fixed_rate_clks));
- samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks,
+ samsung_clk_register_fixed_factor(ctx, exynos5250_fixed_factor_clks,
ARRAY_SIZE(exynos5250_fixed_factor_clks));
- samsung_clk_register_mux(exynos5250_mux_clks,
+ samsung_clk_register_mux(ctx, exynos5250_mux_clks,
ARRAY_SIZE(exynos5250_mux_clks));
- samsung_clk_register_div(exynos5250_div_clks,
+ samsung_clk_register_div(ctx, exynos5250_div_clks,
ARRAY_SIZE(exynos5250_div_clks));
- samsung_clk_register_gate(exynos5250_gate_clks,
+ samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
+ /*
+ * Enable arm clock down (in idle) and set arm divider
+ * ratios in WFI/WFE state.
+ */
+ tmp = (PWR_CTRL1_CORE2_DOWN_RATIO | PWR_CTRL1_CORE1_DOWN_RATIO |
+ PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+ PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+ PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+ __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+ /*
+ * Enable arm clock up (on exiting idle). Set arm divider
+ * ratios when not in idle along with the standby duration
+ * ratios.
+ */
+ tmp = (PWR_CTRL2_DIV2_UP_EN | PWR_CTRL2_DIV1_UP_EN |
+ PWR_CTRL2_DUR_STANDBY2_VAL | PWR_CTRL2_DUR_STANDBY1_VAL |
+ PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO);
+ __raw_writel(tmp, reg_base + PWR_CTRL2);
+
exynos5250_clk_sleep_init();
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
new file mode 100644
index 000000000000..64596ba58df1
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -0,0 +1,1980 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Rahul Sharma <rahul.sharma@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5260 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "clk-exynos5260.h"
+#include "clk.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/exynos5260-clk.h>
+
+static LIST_HEAD(clock_reg_cache_list);
+
+struct exynos5260_clock_reg_cache {
+ struct list_head node;
+ void __iomem *reg_base;
+ struct samsung_clk_reg_dump *rdump;
+ unsigned int rd_num;
+};
+
+struct exynos5260_cmu_info {
+ /* list of pll clocks and respective count */
+ struct samsung_pll_clock *pll_clks;
+ unsigned int nr_pll_clks;
+ /* list of mux clocks and respective count */
+ struct samsung_mux_clock *mux_clks;
+ unsigned int nr_mux_clks;
+ /* list of div clocks and respective count */
+ struct samsung_div_clock *div_clks;
+ unsigned int nr_div_clks;
+ /* list of gate clocks and respective count */
+ struct samsung_gate_clock *gate_clks;
+ unsigned int nr_gate_clks;
+ /* list of fixed clocks and respective count */
+ struct samsung_fixed_rate_clock *fixed_clks;
+ unsigned int nr_fixed_clks;
+ /* total number of clocks with IDs assigned*/
+ unsigned int nr_clk_ids;
+
+ /* list and number of clocks registers */
+ unsigned long *clk_regs;
+ unsigned int nr_clk_regs;
+};
+
+/*
+ * Applicable for all 2550 Type PLLS for Exynos5260, listed below
+ * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
+ */
+static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = {
+ PLL_35XX_RATE(1700000000, 425, 6, 0),
+ PLL_35XX_RATE(1600000000, 200, 3, 0),
+ PLL_35XX_RATE(1500000000, 250, 4, 0),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 6, 0),
+ PLL_35XX_RATE(1200000000, 400, 4, 1),
+ PLL_35XX_RATE(1100000000, 275, 3, 1),
+ PLL_35XX_RATE(1000000000, 250, 3, 1),
+ PLL_35XX_RATE(933000000, 311, 4, 1),
+ PLL_35XX_RATE(900000000, 300, 4, 1),
+ PLL_35XX_RATE(800000000, 200, 3, 1),
+ PLL_35XX_RATE(733000000, 733, 12, 1),
+ PLL_35XX_RATE(700000000, 175, 3, 1),
+ PLL_35XX_RATE(667000000, 667, 12, 1),
+ PLL_35XX_RATE(633000000, 211, 4, 1),
+ PLL_35XX_RATE(620000000, 310, 3, 2),
+ PLL_35XX_RATE(600000000, 400, 4, 2),
+ PLL_35XX_RATE(543000000, 362, 4, 2),
+ PLL_35XX_RATE(533000000, 533, 6, 2),
+ PLL_35XX_RATE(500000000, 250, 3, 2),
+ PLL_35XX_RATE(450000000, 300, 4, 2),
+ PLL_35XX_RATE(400000000, 200, 3, 2),
+ PLL_35XX_RATE(350000000, 175, 3, 2),
+ PLL_35XX_RATE(300000000, 400, 4, 3),
+ PLL_35XX_RATE(266000000, 266, 3, 3),
+ PLL_35XX_RATE(200000000, 200, 3, 3),
+ PLL_35XX_RATE(160000000, 160, 3, 3),
+};
+
+/*
+ * Applicable for 2650 Type PLL for AUD_PLL.
+ */
+static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
+ PLL_36XX_RATE(1600000000, 200, 3, 0, 0),
+ PLL_36XX_RATE(1200000000, 100, 2, 0, 0),
+ PLL_36XX_RATE(1000000000, 250, 3, 1, 0),
+ PLL_36XX_RATE(800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(532000000, 266, 3, 2, 0),
+ PLL_36XX_RATE(480000000, 160, 2, 2, 0),
+ PLL_36XX_RATE(432000000, 144, 2, 2, 0),
+ PLL_36XX_RATE(400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
+ PLL_36XX_RATE(333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+ PLL_36XX_RATE(200000000, 200, 3, 3, 0),
+ PLL_36XX_RATE(166000000, 166, 3, 3, 0),
+ PLL_36XX_RATE(133000000, 266, 3, 4, 0),
+ PLL_36XX_RATE(100000000, 200, 3, 4, 0),
+ PLL_36XX_RATE(66000000, 176, 2, 5, 0),
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+static int exynos5260_clk_suspend(void)
+{
+ struct exynos5260_clock_reg_cache *cache;
+
+ list_for_each_entry(cache, &clock_reg_cache_list, node)
+ samsung_clk_save(cache->reg_base, cache->rdump,
+ cache->rd_num);
+
+ return 0;
+}
+
+static void exynos5260_clk_resume(void)
+{
+ struct exynos5260_clock_reg_cache *cache;
+
+ list_for_each_entry(cache, &clock_reg_cache_list, node)
+ samsung_clk_restore(cache->reg_base, cache->rdump,
+ cache->rd_num);
+}
+
+static struct syscore_ops exynos5260_clk_syscore_ops = {
+ .suspend = exynos5260_clk_suspend,
+ .resume = exynos5260_clk_resume,
+};
+
+static void exynos5260_clk_sleep_init(void __iomem *reg_base,
+ unsigned long *rdump,
+ unsigned long nr_rdump)
+{
+ struct exynos5260_clock_reg_cache *reg_cache;
+
+ reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache),
+ GFP_KERNEL);
+ if (!reg_cache)
+ panic("could not allocate register cache.\n");
+
+ reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
+
+ if (!reg_cache->rdump)
+ panic("could not allocate register dump storage.\n");
+
+ if (list_empty(&clock_reg_cache_list))
+ register_syscore_ops(&exynos5260_clk_syscore_ops);
+
+ reg_cache->rd_num = nr_rdump;
+ reg_cache->reg_base = reg_base;
+ list_add_tail(&reg_cache->node, &clock_reg_cache_list);
+}
+
+#else
+static void exynos5260_clk_sleep_init(void __iomem *reg_base,
+ unsigned long *rdump,
+ unsigned long nr_rdump){}
+#endif
+
+/*
+ * Common function which registers plls, muxes, dividers and gates
+ * for each CMU. It also add CMU register list to register cache.
+ */
+
+void __init exynos5260_cmu_register_one(struct device_node *np,
+ struct exynos5260_cmu_info *cmu)
+{
+ void __iomem *reg_base;
+ struct samsung_clk_provider *ctx;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
+ if (!ctx)
+ panic("%s: unable to alllocate ctx\n", __func__);
+
+ if (cmu->pll_clks)
+ samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
+ reg_base);
+ if (cmu->mux_clks)
+ samsung_clk_register_mux(ctx, cmu->mux_clks,
+ cmu->nr_mux_clks);
+ if (cmu->div_clks)
+ samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
+ if (cmu->gate_clks)
+ samsung_clk_register_gate(ctx, cmu->gate_clks,
+ cmu->nr_gate_clks);
+ if (cmu->fixed_clks)
+ samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
+ cmu->nr_fixed_clks);
+ if (cmu->clk_regs)
+ exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
+ cmu->nr_clk_regs);
+}
+
+
+/* CMU_AUD */
+
+static unsigned long aud_clk_regs[] __initdata = {
+ MUX_SEL_AUD,
+ DIV_AUD0,
+ DIV_AUD1,
+ EN_ACLK_AUD,
+ EN_PCLK_AUD,
+ EN_SCLK_AUD,
+ EN_IP_AUD,
+};
+
+PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"};
+PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"};
+PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"};
+
+struct samsung_mux_clock aud_mux_clks[] __initdata = {
+ MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p,
+ MUX_SEL_AUD, 0, 1),
+ MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p,
+ MUX_SEL_AUD, 4, 1),
+ MUX(AUD_MOUT_SCLK_AUD_PCM, "mout_sclk_aud_pcm", mout_sclk_aud_pcm_p,
+ MUX_SEL_AUD, 8, 1),
+};
+
+struct samsung_div_clock aud_div_clks[] __initdata = {
+ DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user",
+ DIV_AUD0, 0, 4),
+
+ DIV(AUD_DOUT_SCLK_AUD_I2S, "dout_sclk_aud_i2s", "mout_sclk_aud_i2s",
+ DIV_AUD1, 0, 4),
+ DIV(AUD_DOUT_SCLK_AUD_PCM, "dout_sclk_aud_pcm", "mout_sclk_aud_pcm",
+ DIV_AUD1, 4, 8),
+ DIV(AUD_DOUT_SCLK_AUD_UART, "dout_sclk_aud_uart", "mout_aud_pll_user",
+ DIV_AUD1, 12, 4),
+};
+
+struct samsung_gate_clock aud_gate_clks[] __initdata = {
+ GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s",
+ EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm",
+ EN_SCLK_AUD, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(AUD_SCLK_AUD_UART, "sclk_aud_uart", "dout_sclk_aud_uart",
+ EN_SCLK_AUD, 2, CLK_SET_RATE_PARENT, 0),
+
+ GATE(AUD_CLK_SRAMC, "clk_sramc", "dout_aclk_aud_131", EN_IP_AUD,
+ 0, 0, 0),
+ GATE(AUD_CLK_DMAC, "clk_dmac", "dout_aclk_aud_131",
+ EN_IP_AUD, 1, 0, 0),
+ GATE(AUD_CLK_I2S, "clk_i2s", "dout_aclk_aud_131", EN_IP_AUD, 2, 0, 0),
+ GATE(AUD_CLK_PCM, "clk_pcm", "dout_aclk_aud_131", EN_IP_AUD, 3, 0, 0),
+ GATE(AUD_CLK_AUD_UART, "clk_aud_uart", "dout_aclk_aud_131",
+ EN_IP_AUD, 4, 0, 0),
+};
+
+static void __init exynos5260_clk_aud_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = aud_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
+ cmu.div_clks = aud_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(aud_div_clks);
+ cmu.gate_clks = aud_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(aud_gate_clks);
+ cmu.nr_clk_ids = AUD_NR_CLK;
+ cmu.clk_regs = aud_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
+ exynos5260_clk_aud_init);
+
+
+/* CMU_DISP */
+
+static unsigned long disp_clk_regs[] __initdata = {
+ MUX_SEL_DISP0,
+ MUX_SEL_DISP1,
+ MUX_SEL_DISP2,
+ MUX_SEL_DISP3,
+ MUX_SEL_DISP4,
+ DIV_DISP,
+ EN_ACLK_DISP,
+ EN_PCLK_DISP,
+ EN_SCLK_DISP0,
+ EN_SCLK_DISP1,
+ EN_IP_DISP,
+ EN_IP_DISP_BUS,
+};
+
+PNAME(mout_phyclk_dptx_phy_ch3_txd_clk_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_ch3_txd_clk"};
+PNAME(mout_phyclk_dptx_phy_ch2_txd_clk_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_ch2_txd_clk"};
+PNAME(mout_phyclk_dptx_phy_ch1_txd_clk_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_ch1_txd_clk"};
+PNAME(mout_phyclk_dptx_phy_ch0_txd_clk_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_ch0_txd_clk"};
+PNAME(mout_aclk_disp_222_user_p) = {"fin_pll", "dout_aclk_disp_222"};
+PNAME(mout_sclk_disp_pixel_user_p) = {"fin_pll", "dout_sclk_disp_pixel"};
+PNAME(mout_aclk_disp_333_user_p) = {"fin_pll", "dout_aclk_disp_333"};
+PNAME(mout_phyclk_hdmi_phy_tmds_clko_user_p) = {"fin_pll",
+ "phyclk_hdmi_phy_tmds_clko"};
+PNAME(mout_phyclk_hdmi_phy_ref_clko_user_p) = {"fin_pll",
+ "phyclk_hdmi_phy_ref_clko"};
+PNAME(mout_phyclk_hdmi_phy_pixel_clko_user_p) = {"fin_pll",
+ "phyclk_hdmi_phy_pixel_clko"};
+PNAME(mout_phyclk_hdmi_link_o_tmds_clkhi_user_p) = {"fin_pll",
+ "phyclk_hdmi_link_o_tmds_clkhi"};
+PNAME(mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs_p) = {"fin_pll",
+ "phyclk_mipi_dphy_4l_m_txbyte_clkhs"};
+PNAME(mout_phyclk_dptx_phy_o_ref_clk_24m_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_o_ref_clk_24m"};
+PNAME(mout_phyclk_dptx_phy_clk_div2_user_p) = {"fin_pll",
+ "phyclk_dptx_phy_clk_div2"};
+PNAME(mout_sclk_hdmi_pixel_p) = {"mout_sclk_disp_pixel_user",
+ "mout_aclk_disp_222_user"};
+PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll",
+ "phyclk_mipi_dphy_4l_m_rxclkesc0"};
+PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk",
+ "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
+
+struct samsung_mux_clock disp_mux_clks[] __initdata = {
+ MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user",
+ mout_aclk_disp_333_user_p,
+ MUX_SEL_DISP0, 0, 1),
+ MUX(DISP_MOUT_SCLK_DISP_PIXEL_USER, "mout_sclk_disp_pixel_user",
+ mout_sclk_disp_pixel_user_p,
+ MUX_SEL_DISP0, 4, 1),
+ MUX(DISP_MOUT_ACLK_DISP_222_USER, "mout_aclk_disp_222_user",
+ mout_aclk_disp_222_user_p,
+ MUX_SEL_DISP0, 8, 1),
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH0_TXD_CLK_USER,
+ "mout_phyclk_dptx_phy_ch0_txd_clk_user",
+ mout_phyclk_dptx_phy_ch0_txd_clk_user_p,
+ MUX_SEL_DISP0, 16, 1),
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH1_TXD_CLK_USER,
+ "mout_phyclk_dptx_phy_ch1_txd_clk_user",
+ mout_phyclk_dptx_phy_ch1_txd_clk_user_p,
+ MUX_SEL_DISP0, 20, 1),
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH2_TXD_CLK_USER,
+ "mout_phyclk_dptx_phy_ch2_txd_clk_user",
+ mout_phyclk_dptx_phy_ch2_txd_clk_user_p,
+ MUX_SEL_DISP0, 24, 1),
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH3_TXD_CLK_USER,
+ "mout_phyclk_dptx_phy_ch3_txd_clk_user",
+ mout_phyclk_dptx_phy_ch3_txd_clk_user_p,
+ MUX_SEL_DISP0, 28, 1),
+
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CLK_DIV2_USER,
+ "mout_phyclk_dptx_phy_clk_div2_user",
+ mout_phyclk_dptx_phy_clk_div2_user_p,
+ MUX_SEL_DISP1, 0, 1),
+ MUX(DISP_MOUT_PHYCLK_DPTX_PHY_O_REF_CLK_24M_USER,
+ "mout_phyclk_dptx_phy_o_ref_clk_24m_user",
+ mout_phyclk_dptx_phy_o_ref_clk_24m_user_p,
+ MUX_SEL_DISP1, 4, 1),
+ MUX(DISP_MOUT_PHYCLK_MIPI_DPHY_4L_M_TXBYTE_CLKHS,
+ "mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs",
+ mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs_p,
+ MUX_SEL_DISP1, 8, 1),
+ MUX(DISP_MOUT_PHYCLK_HDMI_LINK_O_TMDS_CLKHI_USER,
+ "mout_phyclk_hdmi_link_o_tmds_clkhi_user",
+ mout_phyclk_hdmi_link_o_tmds_clkhi_user_p,
+ MUX_SEL_DISP1, 16, 1),
+ MUX(DISP_MOUT_HDMI_PHY_PIXEL,
+ "mout_phyclk_hdmi_phy_pixel_clko_user",
+ mout_phyclk_hdmi_phy_pixel_clko_user_p,
+ MUX_SEL_DISP1, 20, 1),
+ MUX(DISP_MOUT_PHYCLK_HDMI_PHY_REF_CLKO_USER,
+ "mout_phyclk_hdmi_phy_ref_clko_user",
+ mout_phyclk_hdmi_phy_ref_clko_user_p,
+ MUX_SEL_DISP1, 24, 1),
+ MUX(DISP_MOUT_PHYCLK_HDMI_PHY_TMDS_CLKO_USER,
+ "mout_phyclk_hdmi_phy_tmds_clko_user",
+ mout_phyclk_hdmi_phy_tmds_clko_user_p,
+ MUX_SEL_DISP1, 28, 1),
+
+ MUX(DISP_MOUT_PHYCLK_MIPI_DPHY_4LMRXCLK_ESC0_USER,
+ "mout_phyclk_mipi_dphy_4lmrxclk_esc0_user",
+ mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p,
+ MUX_SEL_DISP2, 0, 1),
+ MUX(DISP_MOUT_SCLK_HDMI_PIXEL, "mout_sclk_hdmi_pixel",
+ mout_sclk_hdmi_pixel_p,
+ MUX_SEL_DISP2, 4, 1),
+
+ MUX(DISP_MOUT_SCLK_HDMI_SPDIF, "mout_sclk_hdmi_spdif",
+ mout_sclk_hdmi_spdif_p,
+ MUX_SEL_DISP4, 4, 2),
+};
+
+struct samsung_div_clock disp_div_clks[] __initdata = {
+ DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111",
+ "mout_aclk_disp_222_user",
+ DIV_DISP, 8, 4),
+ DIV(DISP_DOUT_SCLK_FIMD1_EXTCLKPLL, "dout_sclk_fimd1_extclkpll",
+ "mout_sclk_disp_pixel_user",
+ DIV_DISP, 12, 4),
+ DIV(DISP_DOUT_SCLK_HDMI_PHY_PIXEL_CLKI,
+ "dout_sclk_hdmi_phy_pixel_clki",
+ "mout_sclk_hdmi_pixel",
+ DIV_DISP, 16, 4),
+};
+
+struct samsung_gate_clock disp_gate_clks[] __initdata = {
+ GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel",
+ "mout_phyclk_hdmi_phy_pixel_clko_user",
+ EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0),
+ GATE(DISP_SCLK_PIXEL, "sclk_hdmi_phy_pixel_clki",
+ "dout_sclk_hdmi_phy_pixel_clki",
+ EN_SCLK_DISP0, 29, CLK_SET_RATE_PARENT, 0),
+
+ GATE(DISP_CLK_DP, "clk_dptx_link", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 4, 0, 0),
+ GATE(DISP_CLK_DPPHY, "clk_dptx_phy", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 5, 0, 0),
+ GATE(DISP_CLK_DSIM1, "clk_dsim1", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 6, 0, 0),
+ GATE(DISP_CLK_FIMD1, "clk_fimd1", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 7, 0, 0),
+ GATE(DISP_CLK_HDMI, "clk_hdmi", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 8, 0, 0),
+ GATE(DISP_CLK_HDMIPHY, "clk_hdmiphy", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 9, 0, 0),
+ GATE(DISP_CLK_MIPIPHY, "clk_mipi_dphy", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 10, 0, 0),
+ GATE(DISP_CLK_MIXER, "clk_mixer", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 11, 0, 0),
+ GATE(DISP_CLK_PIXEL_DISP, "clk_pixel_disp", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(DISP_CLK_PIXEL_MIXER, "clk_pixel_mixer", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(DISP_CLK_SMMU_FIMD1M0, "clk_smmu3_fimd1m0",
+ "mout_aclk_disp_222_user",
+ EN_IP_DISP, 22, 0, 0),
+ GATE(DISP_CLK_SMMU_FIMD1M1, "clk_smmu3_fimd1m1",
+ "mout_aclk_disp_222_user",
+ EN_IP_DISP, 23, 0, 0),
+ GATE(DISP_CLK_SMMU_TV, "clk_smmu3_tv", "mout_aclk_disp_222_user",
+ EN_IP_DISP, 25, 0, 0),
+};
+
+static void __init exynos5260_clk_disp_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = disp_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
+ cmu.div_clks = disp_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(disp_div_clks);
+ cmu.gate_clks = disp_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(disp_gate_clks);
+ cmu.nr_clk_ids = DISP_NR_CLK;
+ cmu.clk_regs = disp_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
+ exynos5260_clk_disp_init);
+
+
+/* CMU_EGL */
+
+static unsigned long egl_clk_regs[] __initdata = {
+ EGL_PLL_LOCK,
+ EGL_PLL_CON0,
+ EGL_PLL_CON1,
+ EGL_PLL_FREQ_DET,
+ MUX_SEL_EGL,
+ MUX_ENABLE_EGL,
+ DIV_EGL,
+ DIV_EGL_PLL_FDET,
+ EN_ACLK_EGL,
+ EN_PCLK_EGL,
+ EN_SCLK_EGL,
+};
+
+PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"};
+PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"};
+
+struct samsung_mux_clock egl_mux_clks[] __initdata = {
+ MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p,
+ MUX_SEL_EGL, 4, 1),
+ MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1),
+};
+
+struct samsung_div_clock egl_div_clks[] __initdata = {
+ DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3),
+ DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3),
+ DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3),
+ DIV(EGL_DOUT_PCLK_EGL, "dout_pclk_egl", "dout_egl_atclk",
+ DIV_EGL, 12, 3),
+ DIV(EGL_DOUT_EGL_ATCLK, "dout_egl_atclk", "dout_egl2", DIV_EGL, 16, 3),
+ DIV(EGL_DOUT_EGL_PCLK_DBG, "dout_egl_pclk_dbg", "dout_egl_atclk",
+ DIV_EGL, 20, 3),
+ DIV(EGL_DOUT_EGL_PLL, "dout_egl_pll", "mout_egl_b", DIV_EGL, 24, 3),
+};
+
+static struct samsung_pll_clock egl_pll_clks[] __initdata = {
+ PLL(pll_2550xx, EGL_FOUT_EGL_PLL, "fout_egl_pll", "fin_pll",
+ EGL_PLL_LOCK, EGL_PLL_CON0,
+ pll2550_24mhz_tbl),
+};
+
+static void __init exynos5260_clk_egl_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.pll_clks = egl_pll_clks;
+ cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks);
+ cmu.mux_clks = egl_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(egl_mux_clks);
+ cmu.div_clks = egl_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(egl_div_clks);
+ cmu.nr_clk_ids = EGL_NR_CLK;
+ cmu.clk_regs = egl_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
+ exynos5260_clk_egl_init);
+
+
+/* CMU_FSYS */
+
+static unsigned long fsys_clk_regs[] __initdata = {
+ MUX_SEL_FSYS0,
+ MUX_SEL_FSYS1,
+ EN_ACLK_FSYS,
+ EN_ACLK_FSYS_SECURE_RTIC,
+ EN_ACLK_FSYS_SECURE_SMMU_RTIC,
+ EN_SCLK_FSYS,
+ EN_IP_FSYS,
+ EN_IP_FSYS_SECURE_RTIC,
+ EN_IP_FSYS_SECURE_SMMU_RTIC,
+};
+
+PNAME(mout_phyclk_usbhost20_phyclk_user_p) = {"fin_pll",
+ "phyclk_usbhost20_phy_phyclock"};
+PNAME(mout_phyclk_usbhost20_freeclk_user_p) = {"fin_pll",
+ "phyclk_usbhost20_phy_freeclk"};
+PNAME(mout_phyclk_usbhost20_clk48mohci_user_p) = {"fin_pll",
+ "phyclk_usbhost20_phy_clk48mohci"};
+PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll",
+ "phyclk_usbdrd30_udrd30_pipe_pclk"};
+PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll",
+ "phyclk_usbdrd30_udrd30_phyclock"};
+
+struct samsung_mux_clock fsys_mux_clks[] __initdata = {
+ MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER,
+ "mout_phyclk_usbdrd30_phyclock_user",
+ mout_phyclk_usbdrd30_phyclock_user_p,
+ MUX_SEL_FSYS1, 0, 1),
+ MUX(FSYS_MOUT_PHYCLK_USBDRD30_PIPE_PCLK_USER,
+ "mout_phyclk_usbdrd30_pipe_pclk_user",
+ mout_phyclk_usbdrd30_pipe_pclk_user_p,
+ MUX_SEL_FSYS1, 4, 1),
+ MUX(FSYS_MOUT_PHYCLK_USBHOST20_CLK48MOHCI_USER,
+ "mout_phyclk_usbhost20_clk48mohci_user",
+ mout_phyclk_usbhost20_clk48mohci_user_p,
+ MUX_SEL_FSYS1, 8, 1),
+ MUX(FSYS_MOUT_PHYCLK_USBHOST20_FREECLK_USER,
+ "mout_phyclk_usbhost20_freeclk_user",
+ mout_phyclk_usbhost20_freeclk_user_p,
+ MUX_SEL_FSYS1, 12, 1),
+ MUX(FSYS_MOUT_PHYCLK_USBHOST20_PHYCLK_USER,
+ "mout_phyclk_usbhost20_phyclk_user",
+ mout_phyclk_usbhost20_phyclk_user_p,
+ MUX_SEL_FSYS1, 16, 1),
+};
+
+struct samsung_gate_clock fsys_gate_clks[] __initdata = {
+ GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock",
+ "mout_phyclk_usbdrd30_phyclock_user",
+ EN_SCLK_FSYS, 1, 0, 0),
+ GATE(FSYS_PHYCLK_USBDRD30, "phyclk_usbdrd30_udrd30_phyclock_g",
+ "mout_phyclk_usbdrd30_phyclock_user",
+ EN_SCLK_FSYS, 7, 0, 0),
+
+ GATE(FSYS_CLK_MMC0, "clk_mmc0", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 6, 0, 0),
+ GATE(FSYS_CLK_MMC1, "clk_mmc1", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 7, 0, 0),
+ GATE(FSYS_CLK_MMC2, "clk_mmc2", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 8, 0, 0),
+ GATE(FSYS_CLK_PDMA, "clk_pdma", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 9, 0, 0),
+ GATE(FSYS_CLK_SROMC, "clk_sromc", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 13, 0, 0),
+ GATE(FSYS_CLK_USBDRD30, "clk_usbdrd30", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 14, 0, 0),
+ GATE(FSYS_CLK_USBHOST20, "clk_usbhost20", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 15, 0, 0),
+ GATE(FSYS_CLK_USBLINK, "clk_usblink", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 18, 0, 0),
+ GATE(FSYS_CLK_TSI, "clk_tsi", "dout_aclk_fsys_200",
+ EN_IP_FSYS, 20, 0, 0),
+
+ GATE(FSYS_CLK_RTIC, "clk_rtic", "dout_aclk_fsys_200",
+ EN_IP_FSYS_SECURE_RTIC, 11, 0, 0),
+ GATE(FSYS_CLK_SMMU_RTIC, "clk_smmu_rtic", "dout_aclk_fsys_200",
+ EN_IP_FSYS_SECURE_SMMU_RTIC, 12, 0, 0),
+};
+
+static void __init exynos5260_clk_fsys_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = fsys_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
+ cmu.gate_clks = fsys_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(fsys_gate_clks);
+ cmu.nr_clk_ids = FSYS_NR_CLK;
+ cmu.clk_regs = fsys_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
+ exynos5260_clk_fsys_init);
+
+
+/* CMU_G2D */
+
+static unsigned long g2d_clk_regs[] __initdata = {
+ MUX_SEL_G2D,
+ MUX_STAT_G2D,
+ DIV_G2D,
+ EN_ACLK_G2D,
+ EN_ACLK_G2D_SECURE_SSS,
+ EN_ACLK_G2D_SECURE_SLIM_SSS,
+ EN_ACLK_G2D_SECURE_SMMU_SLIM_SSS,
+ EN_ACLK_G2D_SECURE_SMMU_SSS,
+ EN_ACLK_G2D_SECURE_SMMU_MDMA,
+ EN_ACLK_G2D_SECURE_SMMU_G2D,
+ EN_PCLK_G2D,
+ EN_PCLK_G2D_SECURE_SMMU_SLIM_SSS,
+ EN_PCLK_G2D_SECURE_SMMU_SSS,
+ EN_PCLK_G2D_SECURE_SMMU_MDMA,
+ EN_PCLK_G2D_SECURE_SMMU_G2D,
+ EN_IP_G2D,
+ EN_IP_G2D_SECURE_SSS,
+ EN_IP_G2D_SECURE_SLIM_SSS,
+ EN_IP_G2D_SECURE_SMMU_SLIM_SSS,
+ EN_IP_G2D_SECURE_SMMU_SSS,
+ EN_IP_G2D_SECURE_SMMU_MDMA,
+ EN_IP_G2D_SECURE_SMMU_G2D,
+};
+
+PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"};
+
+struct samsung_mux_clock g2d_mux_clks[] __initdata = {
+ MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user",
+ mout_aclk_g2d_333_user_p,
+ MUX_SEL_G2D, 0, 1),
+};
+
+struct samsung_div_clock g2d_div_clks[] __initdata = {
+ DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user",
+ DIV_G2D, 0, 3),
+};
+
+struct samsung_gate_clock g2d_gate_clks[] __initdata = {
+ GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user",
+ EN_IP_G2D, 4, 0, 0),
+ GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user",
+ EN_IP_G2D, 5, 0, 0),
+ GATE(G2D_CLK_MDMA, "clk_mdma", "mout_aclk_g2d_333_user",
+ EN_IP_G2D, 6, 0, 0),
+ GATE(G2D_CLK_SMMU3_JPEG, "clk_smmu3_jpeg", "mout_aclk_g2d_333_user",
+ EN_IP_G2D, 16, 0, 0),
+
+ GATE(G2D_CLK_SSS, "clk_sss", "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SSS, 17, 0, 0),
+
+ GATE(G2D_CLK_SLIM_SSS, "clk_slim_sss", "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SLIM_SSS, 11, 0, 0),
+
+ GATE(G2D_CLK_SMMU_SLIM_SSS, "clk_smmu_slim_sss",
+ "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SMMU_SLIM_SSS, 13, 0, 0),
+
+ GATE(G2D_CLK_SMMU_SSS, "clk_smmu_sss", "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SMMU_SSS, 14, 0, 0),
+
+ GATE(G2D_CLK_SMMU_MDMA, "clk_smmu_mdma", "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SMMU_MDMA, 12, 0, 0),
+
+ GATE(G2D_CLK_SMMU3_G2D, "clk_smmu3_g2d", "mout_aclk_g2d_333_user",
+ EN_IP_G2D_SECURE_SMMU_G2D, 15, 0, 0),
+};
+
+static void __init exynos5260_clk_g2d_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = g2d_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
+ cmu.div_clks = g2d_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(g2d_div_clks);
+ cmu.gate_clks = g2d_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(g2d_gate_clks);
+ cmu.nr_clk_ids = G2D_NR_CLK;
+ cmu.clk_regs = g2d_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
+ exynos5260_clk_g2d_init);
+
+
+/* CMU_G3D */
+
+static unsigned long g3d_clk_regs[] __initdata = {
+ G3D_PLL_LOCK,
+ G3D_PLL_CON0,
+ G3D_PLL_CON1,
+ G3D_PLL_FDET,
+ MUX_SEL_G3D,
+ DIV_G3D,
+ DIV_G3D_PLL_FDET,
+ EN_ACLK_G3D,
+ EN_PCLK_G3D,
+ EN_SCLK_G3D,
+ EN_IP_G3D,
+};
+
+PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"};
+
+struct samsung_mux_clock g3d_mux_clks[] __initdata = {
+ MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+ MUX_SEL_G3D, 0, 1),
+};
+
+struct samsung_div_clock g3d_div_clks[] __initdata = {
+ DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3),
+ DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3),
+};
+
+struct samsung_gate_clock g3d_gate_clks[] __initdata = {
+ GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0),
+ GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d",
+ EN_IP_G3D, 3, 0, 0),
+};
+
+static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
+ PLL(pll_2550, G3D_FOUT_G3D_PLL, "fout_g3d_pll", "fin_pll",
+ G3D_PLL_LOCK, G3D_PLL_CON0,
+ pll2550_24mhz_tbl),
+};
+
+static void __init exynos5260_clk_g3d_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.pll_clks = g3d_pll_clks;
+ cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks);
+ cmu.mux_clks = g3d_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(g3d_mux_clks);
+ cmu.div_clks = g3d_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(g3d_div_clks);
+ cmu.gate_clks = g3d_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(g3d_gate_clks);
+ cmu.nr_clk_ids = G3D_NR_CLK;
+ cmu.clk_regs = g3d_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
+ exynos5260_clk_g3d_init);
+
+
+/* CMU_GSCL */
+
+static unsigned long gscl_clk_regs[] __initdata = {
+ MUX_SEL_GSCL,
+ DIV_GSCL,
+ EN_ACLK_GSCL,
+ EN_ACLK_GSCL_FIMC,
+ EN_ACLK_GSCL_SECURE_SMMU_GSCL0,
+ EN_ACLK_GSCL_SECURE_SMMU_GSCL1,
+ EN_ACLK_GSCL_SECURE_SMMU_MSCL0,
+ EN_ACLK_GSCL_SECURE_SMMU_MSCL1,
+ EN_PCLK_GSCL,
+ EN_PCLK_GSCL_FIMC,
+ EN_PCLK_GSCL_SECURE_SMMU_GSCL0,
+ EN_PCLK_GSCL_SECURE_SMMU_GSCL1,
+ EN_PCLK_GSCL_SECURE_SMMU_MSCL0,
+ EN_PCLK_GSCL_SECURE_SMMU_MSCL1,
+ EN_SCLK_GSCL,
+ EN_SCLK_GSCL_FIMC,
+ EN_IP_GSCL,
+ EN_IP_GSCL_FIMC,
+ EN_IP_GSCL_SECURE_SMMU_GSCL0,
+ EN_IP_GSCL_SECURE_SMMU_GSCL1,
+ EN_IP_GSCL_SECURE_SMMU_MSCL0,
+ EN_IP_GSCL_SECURE_SMMU_MSCL1,
+};
+
+PNAME(mout_aclk_gscl_333_user_p) = {"fin_pll", "dout_aclk_gscl_333"};
+PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
+PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
+PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"};
+
+struct samsung_mux_clock gscl_mux_clks[] __initdata = {
+ MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user",
+ mout_aclk_gscl_333_user_p,
+ MUX_SEL_GSCL, 0, 1),
+ MUX(GSCL_MOUT_ACLK_M2M_400_USER, "mout_aclk_m2m_400_user",
+ mout_aclk_m2m_400_user_p,
+ MUX_SEL_GSCL, 4, 1),
+ MUX(GSCL_MOUT_ACLK_GSCL_FIMC_USER, "mout_aclk_gscl_fimc_user",
+ mout_aclk_gscl_fimc_user_p,
+ MUX_SEL_GSCL, 8, 1),
+ MUX(GSCL_MOUT_ACLK_CSIS, "mout_aclk_csis", mout_aclk_csis_p,
+ MUX_SEL_GSCL, 24, 1),
+};
+
+struct samsung_div_clock gscl_div_clks[] __initdata = {
+ DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100",
+ "mout_aclk_m2m_400_user",
+ DIV_GSCL, 0, 3),
+ DIV(GSCL_DOUT_ACLK_CSIS_200, "dout_aclk_csis_200",
+ "mout_aclk_m2m_400_user",
+ DIV_GSCL, 4, 3),
+};
+
+struct samsung_gate_clock gscl_gate_clks[] __initdata = {
+ GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200",
+ EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200",
+ EN_SCLK_GSCL_FIMC, 1, CLK_SET_RATE_PARENT, 0),
+
+ GATE(GSCL_CLK_GSCL0, "clk_gscl0", "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 2, 0, 0),
+ GATE(GSCL_CLK_GSCL1, "clk_gscl1", "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 3, 0, 0),
+ GATE(GSCL_CLK_MSCL0, "clk_mscl0", "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 4, 0, 0),
+ GATE(GSCL_CLK_MSCL1, "clk_mscl1", "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 5, 0, 0),
+ GATE(GSCL_CLK_PIXEL_GSCL0, "clk_pixel_gscl0",
+ "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 8, 0, 0),
+ GATE(GSCL_CLK_PIXEL_GSCL1, "clk_pixel_gscl1",
+ "mout_aclk_gscl_333_user",
+ EN_IP_GSCL, 9, 0, 0),
+
+ GATE(GSCL_CLK_SMMU3_LITE_A, "clk_smmu3_lite_a",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 5, 0, 0),
+ GATE(GSCL_CLK_SMMU3_LITE_B, "clk_smmu3_lite_b",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 6, 0, 0),
+ GATE(GSCL_CLK_SMMU3_LITE_D, "clk_smmu3_lite_d",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 7, 0, 0),
+ GATE(GSCL_CLK_CSIS0, "clk_csis0", "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 8, 0, 0),
+ GATE(GSCL_CLK_CSIS1, "clk_csis1", "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 9, 0, 0),
+ GATE(GSCL_CLK_FIMC_LITE_A, "clk_fimc_lite_a",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 10, 0, 0),
+ GATE(GSCL_CLK_FIMC_LITE_B, "clk_fimc_lite_b",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 11, 0, 0),
+ GATE(GSCL_CLK_FIMC_LITE_D, "clk_fimc_lite_d",
+ "mout_aclk_gscl_fimc_user",
+ EN_IP_GSCL_FIMC, 12, 0, 0),
+
+ GATE(GSCL_CLK_SMMU3_GSCL0, "clk_smmu3_gscl0",
+ "mout_aclk_gscl_333_user",
+ EN_IP_GSCL_SECURE_SMMU_GSCL0, 17, 0, 0),
+ GATE(GSCL_CLK_SMMU3_GSCL1, "clk_smmu3_gscl1", "mout_aclk_gscl_333_user",
+ EN_IP_GSCL_SECURE_SMMU_GSCL1, 18, 0, 0),
+ GATE(GSCL_CLK_SMMU3_MSCL0, "clk_smmu3_mscl0",
+ "mout_aclk_m2m_400_user",
+ EN_IP_GSCL_SECURE_SMMU_MSCL0, 19, 0, 0),
+ GATE(GSCL_CLK_SMMU3_MSCL1, "clk_smmu3_mscl1",
+ "mout_aclk_m2m_400_user",
+ EN_IP_GSCL_SECURE_SMMU_MSCL1, 20, 0, 0),
+};
+
+static void __init exynos5260_clk_gscl_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = gscl_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
+ cmu.div_clks = gscl_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(gscl_div_clks);
+ cmu.gate_clks = gscl_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(gscl_gate_clks);
+ cmu.nr_clk_ids = GSCL_NR_CLK;
+ cmu.clk_regs = gscl_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
+ exynos5260_clk_gscl_init);
+
+
+/* CMU_ISP */
+
+static unsigned long isp_clk_regs[] __initdata = {
+ MUX_SEL_ISP0,
+ MUX_SEL_ISP1,
+ DIV_ISP,
+ EN_ACLK_ISP0,
+ EN_ACLK_ISP1,
+ EN_PCLK_ISP0,
+ EN_PCLK_ISP1,
+ EN_SCLK_ISP,
+ EN_IP_ISP0,
+ EN_IP_ISP1,
+};
+
+PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"};
+PNAME(mout_isp_266_user_p) = {"fin_pll", "dout_aclk_isp1_266"};
+
+struct samsung_mux_clock isp_mux_clks[] __initdata = {
+ MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p,
+ MUX_SEL_ISP0, 0, 1),
+ MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p,
+ MUX_SEL_ISP0, 4, 1),
+};
+
+struct samsung_div_clock isp_div_clks[] __initdata = {
+ DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc",
+ DIV_ISP, 0, 3),
+ DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc",
+ DIV_ISP, 4, 4),
+ DIV(ISP_DOUT_CA5_ATCLKIN, "dout_ca5_atclkin", "mout_kfc",
+ DIV_ISP, 12, 3),
+ DIV(ISP_DOUT_CA5_PCLKDBG, "dout_ca5_pclkdbg", "mout_kfc",
+ DIV_ISP, 16, 4),
+ DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2),
+};
+
+struct samsung_gate_clock isp_gate_clks[] __initdata = {
+ GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266",
+ EN_IP_ISP0, 15, 0, 0),
+
+ GATE(ISP_CLK_CA5, "clk_isp_ca5", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 1, 0, 0),
+ GATE(ISP_CLK_FIMC_DRC, "clk_isp_fimc_drc", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 2, 0, 0),
+ GATE(ISP_CLK_FIMC_FD, "clk_isp_fimc_fd", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 3, 0, 0),
+ GATE(ISP_CLK_FIMC, "clk_isp_fimc", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 4, 0, 0),
+ GATE(ISP_CLK_FIMC_SCALERC, "clk_isp_fimc_scalerc",
+ "mout_aclk_isp1_266",
+ EN_IP_ISP1, 5, 0, 0),
+ GATE(ISP_CLK_FIMC_SCALERP, "clk_isp_fimc_scalerp",
+ "mout_aclk_isp1_266",
+ EN_IP_ISP1, 6, 0, 0),
+ GATE(ISP_CLK_I2C0, "clk_isp_i2c0", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 7, 0, 0),
+ GATE(ISP_CLK_I2C1, "clk_isp_i2c1", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 8, 0, 0),
+ GATE(ISP_CLK_MCUCTL, "clk_isp_mcuctl", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 9, 0, 0),
+ GATE(ISP_CLK_MPWM, "clk_isp_mpwm", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 10, 0, 0),
+ GATE(ISP_CLK_MTCADC, "clk_isp_mtcadc", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 11, 0, 0),
+ GATE(ISP_CLK_PWM, "clk_isp_pwm", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 14, 0, 0),
+ GATE(ISP_CLK_SMMU_DRC, "clk_smmu_drc", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 21, 0, 0),
+ GATE(ISP_CLK_SMMU_FD, "clk_smmu_fd", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 22, 0, 0),
+ GATE(ISP_CLK_SMMU_ISP, "clk_smmu_isp", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 23, 0, 0),
+ GATE(ISP_CLK_SMMU_ISPCX, "clk_smmu_ispcx", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 24, 0, 0),
+ GATE(ISP_CLK_SMMU_SCALERC, "clk_isp_smmu_scalerc",
+ "mout_aclk_isp1_266",
+ EN_IP_ISP1, 25, 0, 0),
+ GATE(ISP_CLK_SMMU_SCALERP, "clk_isp_smmu_scalerp",
+ "mout_aclk_isp1_266",
+ EN_IP_ISP1, 26, 0, 0),
+ GATE(ISP_CLK_SPI0, "clk_isp_spi0", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 27, 0, 0),
+ GATE(ISP_CLK_SPI1, "clk_isp_spi1", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 28, 0, 0),
+ GATE(ISP_CLK_WDT, "clk_isp_wdt", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 31, 0, 0),
+ GATE(ISP_CLK_UART, "clk_isp_uart", "mout_aclk_isp1_266",
+ EN_IP_ISP1, 30, 0, 0),
+
+ GATE(ISP_SCLK_UART_EXT, "sclk_isp_uart_ext", "fin_pll",
+ EN_SCLK_ISP, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(ISP_SCLK_SPI1_EXT, "sclk_isp_spi1_ext", "fin_pll",
+ EN_SCLK_ISP, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(ISP_SCLK_SPI0_EXT, "sclk_isp_spi0_ext", "fin_pll",
+ EN_SCLK_ISP, 9, CLK_SET_RATE_PARENT, 0),
+};
+
+static void __init exynos5260_clk_isp_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = isp_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
+ cmu.div_clks = isp_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(isp_div_clks);
+ cmu.gate_clks = isp_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(isp_gate_clks);
+ cmu.nr_clk_ids = ISP_NR_CLK;
+ cmu.clk_regs = isp_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
+ exynos5260_clk_isp_init);
+
+
+/* CMU_KFC */
+
+static unsigned long kfc_clk_regs[] __initdata = {
+ KFC_PLL_LOCK,
+ KFC_PLL_CON0,
+ KFC_PLL_CON1,
+ KFC_PLL_FDET,
+ MUX_SEL_KFC0,
+ MUX_SEL_KFC2,
+ DIV_KFC,
+ DIV_KFC_PLL_FDET,
+ EN_ACLK_KFC,
+ EN_PCLK_KFC,
+ EN_SCLK_KFC,
+ EN_IP_KFC,
+};
+
+PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"};
+PNAME(mout_kfc_p) = {"mout_kfc_pll", "dout_media_pll"};
+
+struct samsung_mux_clock kfc_mux_clks[] __initdata = {
+ MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p,
+ MUX_SEL_KFC0, 0, 1),
+ MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1),
+};
+
+struct samsung_div_clock kfc_div_clks[] __initdata = {
+ DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3),
+ DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3),
+ DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3),
+ DIV(KFC_DOUT_KFC_PCLK_DBG, "dout_kfc_pclk_dbg", "dout_kfc2",
+ DIV_KFC, 12, 3),
+ DIV(KFC_DOUT_ACLK_KFC, "dout_aclk_kfc", "dout_kfc2", DIV_KFC, 16, 3),
+ DIV(KFC_DOUT_PCLK_KFC, "dout_pclk_kfc", "dout_kfc2", DIV_KFC, 20, 3),
+ DIV(KFC_DOUT_KFC_PLL, "dout_kfc_pll", "mout_kfc", DIV_KFC, 24, 3),
+};
+
+static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
+ PLL(pll_2550xx, KFC_FOUT_KFC_PLL, "fout_kfc_pll", "fin_pll",
+ KFC_PLL_LOCK, KFC_PLL_CON0,
+ pll2550_24mhz_tbl),
+};
+
+static void __init exynos5260_clk_kfc_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.pll_clks = kfc_pll_clks;
+ cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks);
+ cmu.mux_clks = kfc_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(kfc_mux_clks);
+ cmu.div_clks = kfc_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(kfc_div_clks);
+ cmu.nr_clk_ids = KFC_NR_CLK;
+ cmu.clk_regs = kfc_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
+ exynos5260_clk_kfc_init);
+
+
+/* CMU_MFC */
+
+static unsigned long mfc_clk_regs[] __initdata = {
+ MUX_SEL_MFC,
+ DIV_MFC,
+ EN_ACLK_MFC,
+ EN_ACLK_SECURE_SMMU2_MFC,
+ EN_PCLK_MFC,
+ EN_PCLK_SECURE_SMMU2_MFC,
+ EN_IP_MFC,
+ EN_IP_MFC_SECURE_SMMU2_MFC,
+};
+
+PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"};
+
+struct samsung_mux_clock mfc_mux_clks[] __initdata = {
+ MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user",
+ mout_aclk_mfc_333_user_p,
+ MUX_SEL_MFC, 0, 1),
+};
+
+struct samsung_div_clock mfc_div_clks[] __initdata = {
+ DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user",
+ DIV_MFC, 0, 3),
+};
+
+struct samsung_gate_clock mfc_gate_clks[] __initdata = {
+ GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user",
+ EN_IP_MFC, 1, 0, 0),
+ GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user",
+ EN_IP_MFC_SECURE_SMMU2_MFC, 6, 0, 0),
+ GATE(MFC_CLK_SMMU2_MFCM1, "clk_smmu2_mfcm1", "mout_aclk_mfc_333_user",
+ EN_IP_MFC_SECURE_SMMU2_MFC, 7, 0, 0),
+};
+
+static void __init exynos5260_clk_mfc_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = mfc_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
+ cmu.div_clks = mfc_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(mfc_div_clks);
+ cmu.gate_clks = mfc_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(mfc_gate_clks);
+ cmu.nr_clk_ids = MFC_NR_CLK;
+ cmu.clk_regs = mfc_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
+ exynos5260_clk_mfc_init);
+
+
+/* CMU_MIF */
+
+static unsigned long mif_clk_regs[] __initdata = {
+ MEM_PLL_LOCK,
+ BUS_PLL_LOCK,
+ MEDIA_PLL_LOCK,
+ MEM_PLL_CON0,
+ MEM_PLL_CON1,
+ MEM_PLL_FDET,
+ BUS_PLL_CON0,
+ BUS_PLL_CON1,
+ BUS_PLL_FDET,
+ MEDIA_PLL_CON0,
+ MEDIA_PLL_CON1,
+ MEDIA_PLL_FDET,
+ MUX_SEL_MIF,
+ DIV_MIF,
+ DIV_MIF_PLL_FDET,
+ EN_ACLK_MIF,
+ EN_ACLK_MIF_SECURE_DREX1_TZ,
+ EN_ACLK_MIF_SECURE_DREX0_TZ,
+ EN_ACLK_MIF_SECURE_INTMEM,
+ EN_PCLK_MIF,
+ EN_PCLK_MIF_SECURE_MONOCNT,
+ EN_PCLK_MIF_SECURE_RTC_APBIF,
+ EN_PCLK_MIF_SECURE_DREX1_TZ,
+ EN_PCLK_MIF_SECURE_DREX0_TZ,
+ EN_SCLK_MIF,
+ EN_IP_MIF,
+ EN_IP_MIF_SECURE_MONOCNT,
+ EN_IP_MIF_SECURE_RTC_APBIF,
+ EN_IP_MIF_SECURE_DREX1_TZ,
+ EN_IP_MIF_SECURE_DREX0_TZ,
+ EN_IP_MIF_SECURE_INTEMEM,
+};
+
+PNAME(mout_mem_pll_p) = {"fin_pll", "fout_mem_pll"};
+PNAME(mout_bus_pll_p) = {"fin_pll", "fout_bus_pll"};
+PNAME(mout_media_pll_p) = {"fin_pll", "fout_media_pll"};
+PNAME(mout_mif_drex_p) = {"dout_mem_pll", "dout_bus_pll"};
+PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"};
+PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"};
+PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"};
+
+struct samsung_mux_clock mif_mux_clks[] __initdata = {
+ MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p,
+ MUX_SEL_MIF, 0, 1),
+ MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p,
+ MUX_SEL_MIF, 4, 1),
+ MUX(MIF_MOUT_MEDIA_PLL, "mout_media_pll", mout_media_pll_p,
+ MUX_SEL_MIF, 8, 1),
+ MUX(MIF_MOUT_MIF_DREX, "mout_mif_drex", mout_mif_drex_p,
+ MUX_SEL_MIF, 12, 1),
+ MUX(MIF_MOUT_CLKM_PHY, "mout_clkm_phy", mout_clkm_phy_p,
+ MUX_SEL_MIF, 16, 1),
+ MUX(MIF_MOUT_MIF_DREX2X, "mout_mif_drex2x", mout_mif_drex2x_p,
+ MUX_SEL_MIF, 20, 1),
+ MUX(MIF_MOUT_CLK2X_PHY, "mout_clk2x_phy", mout_clk2x_phy_p,
+ MUX_SEL_MIF, 24, 1),
+};
+
+struct samsung_div_clock mif_div_clks[] __initdata = {
+ DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll",
+ DIV_MIF, 0, 3),
+ DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll",
+ DIV_MIF, 4, 3),
+ DIV(MIF_DOUT_BUS_PLL, "dout_bus_pll", "mout_bus_pll",
+ DIV_MIF, 8, 3),
+ DIV(MIF_DOUT_CLKM_PHY, "dout_clkm_phy", "mout_clkm_phy",
+ DIV_MIF, 12, 3),
+ DIV(MIF_DOUT_CLK2X_PHY, "dout_clk2x_phy", "mout_clk2x_phy",
+ DIV_MIF, 16, 4),
+ DIV(MIF_DOUT_ACLK_MIF_466, "dout_aclk_mif_466", "dout_clk2x_phy",
+ DIV_MIF, 20, 3),
+ DIV(MIF_DOUT_ACLK_BUS_200, "dout_aclk_bus_200", "dout_bus_pll",
+ DIV_MIF, 24, 3),
+ DIV(MIF_DOUT_ACLK_BUS_100, "dout_aclk_bus_100", "dout_bus_pll",
+ DIV_MIF, 28, 4),
+};
+
+struct samsung_gate_clock mif_gate_clks[] __initdata = {
+ GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy",
+ EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy",
+ EN_IP_MIF, 13, CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_CLK_MONOCNT, "clk_monocnt", "dout_aclk_bus_100",
+ EN_IP_MIF_SECURE_MONOCNT, 22,
+ CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_CLK_MIF_RTC, "clk_mif_rtc", "dout_aclk_bus_100",
+ EN_IP_MIF_SECURE_RTC_APBIF, 23,
+ CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_CLK_DREX1, "clk_drex1", "dout_aclk_mif_466",
+ EN_IP_MIF_SECURE_DREX1_TZ, 9,
+ CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_CLK_DREX0, "clk_drex0", "dout_aclk_mif_466",
+ EN_IP_MIF_SECURE_DREX0_TZ, 9,
+ CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_CLK_INTMEM, "clk_intmem", "dout_aclk_bus_200",
+ EN_IP_MIF_SECURE_INTEMEM, 11,
+ CLK_IGNORE_UNUSED, 0),
+
+ GATE(MIF_SCLK_LPDDR3PHY_WRAP_U0, "sclk_lpddr3phy_wrap_u0",
+ "dout_clkm_phy", EN_SCLK_MIF, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(MIF_SCLK_LPDDR3PHY_WRAP_U1, "sclk_lpddr3phy_wrap_u1",
+ "dout_clkm_phy", EN_SCLK_MIF, 1,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_pll_clock mif_pll_clks[] __initdata = {
+ PLL(pll_2550xx, MIF_FOUT_MEM_PLL, "fout_mem_pll", "fin_pll",
+ MEM_PLL_LOCK, MEM_PLL_CON0,
+ pll2550_24mhz_tbl),
+ PLL(pll_2550xx, MIF_FOUT_BUS_PLL, "fout_bus_pll", "fin_pll",
+ BUS_PLL_LOCK, BUS_PLL_CON0,
+ pll2550_24mhz_tbl),
+ PLL(pll_2550xx, MIF_FOUT_MEDIA_PLL, "fout_media_pll", "fin_pll",
+ MEDIA_PLL_LOCK, MEDIA_PLL_CON0,
+ pll2550_24mhz_tbl),
+};
+
+static void __init exynos5260_clk_mif_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.pll_clks = mif_pll_clks;
+ cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks);
+ cmu.mux_clks = mif_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(mif_mux_clks);
+ cmu.div_clks = mif_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(mif_div_clks);
+ cmu.gate_clks = mif_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(mif_gate_clks);
+ cmu.nr_clk_ids = MIF_NR_CLK;
+ cmu.clk_regs = mif_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
+ exynos5260_clk_mif_init);
+
+
+/* CMU_PERI */
+
+static unsigned long peri_clk_regs[] __initdata = {
+ MUX_SEL_PERI,
+ MUX_SEL_PERI1,
+ DIV_PERI,
+ EN_PCLK_PERI0,
+ EN_PCLK_PERI1,
+ EN_PCLK_PERI2,
+ EN_PCLK_PERI3,
+ EN_PCLK_PERI_SECURE_CHIPID,
+ EN_PCLK_PERI_SECURE_PROVKEY0,
+ EN_PCLK_PERI_SECURE_PROVKEY1,
+ EN_PCLK_PERI_SECURE_SECKEY,
+ EN_PCLK_PERI_SECURE_ANTIRBKCNT,
+ EN_PCLK_PERI_SECURE_TOP_RTC,
+ EN_PCLK_PERI_SECURE_TZPC,
+ EN_SCLK_PERI,
+ EN_SCLK_PERI_SECURE_TOP_RTC,
+ EN_IP_PERI0,
+ EN_IP_PERI1,
+ EN_IP_PERI2,
+ EN_IP_PERI_SECURE_CHIPID,
+ EN_IP_PERI_SECURE_PROVKEY0,
+ EN_IP_PERI_SECURE_PROVKEY1,
+ EN_IP_PERI_SECURE_SECKEY,
+ EN_IP_PERI_SECURE_ANTIRBKCNT,
+ EN_IP_PERI_SECURE_TOP_RTC,
+ EN_IP_PERI_SECURE_TZPC,
+};
+
+PNAME(mout_sclk_pcm_p) = {"ioclk_pcm_extclk", "fin_pll", "dout_aclk_peri_aud",
+ "phyclk_hdmi_phy_ref_cko"};
+PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud",
+ "phyclk_hdmi_phy_ref_cko"};
+PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll",
+ "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
+
+struct samsung_mux_clock peri_mux_clks[] __initdata = {
+ MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p,
+ MUX_SEL_PERI1, 4, 2),
+ MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p,
+ MUX_SEL_PERI1, 12, 2),
+ MUX(PERI_MOUT_SCLK_SPDIF, "mout_sclk_spdif", mout_sclk_spdif_p,
+ MUX_SEL_PERI1, 20, 2),
+};
+
+struct samsung_div_clock peri_div_clks[] __initdata = {
+ DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8),
+ DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6),
+};
+
+struct samsung_gate_clock peri_gate_clks[] __initdata = {
+ GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_SPDIF, "sclk_spdif", "dout_sclk_peri_spi0_b",
+ EN_SCLK_PERI, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_SPI0, "sclk_spi0", "dout_sclk_peri_spi0_b",
+ EN_SCLK_PERI, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_SPI1, "sclk_spi1", "dout_sclk_peri_spi1_b",
+ EN_SCLK_PERI, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_SPI2, "sclk_spi2", "dout_sclk_peri_spi2_b",
+ EN_SCLK_PERI, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_UART0, "sclk_uart0", "dout_sclk_peri_uart0",
+ EN_SCLK_PERI, 10, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_UART1, "sclk_uart1", "dout_sclk_peri_uart1",
+ EN_SCLK_PERI, 11, CLK_SET_RATE_PARENT, 0),
+ GATE(PERI_SCLK_UART2, "sclk_uart2", "dout_sclk_peri_uart2",
+ EN_SCLK_PERI, 12, CLK_SET_RATE_PARENT, 0),
+
+ GATE(PERI_CLK_ABB, "clk_abb", "dout_aclk_peri_66",
+ EN_IP_PERI0, 1, 0, 0),
+ GATE(PERI_CLK_EFUSE_WRITER, "clk_efuse_writer", "dout_aclk_peri_66",
+ EN_IP_PERI0, 5, 0, 0),
+ GATE(PERI_CLK_HDMICEC, "clk_hdmicec", "dout_aclk_peri_66",
+ EN_IP_PERI0, 6, 0, 0),
+ GATE(PERI_CLK_I2C10, "clk_i2c10", "dout_aclk_peri_66",
+ EN_IP_PERI0, 7, 0, 0),
+ GATE(PERI_CLK_I2C11, "clk_i2c11", "dout_aclk_peri_66",
+ EN_IP_PERI0, 8, 0, 0),
+ GATE(PERI_CLK_I2C8, "clk_i2c8", "dout_aclk_peri_66",
+ EN_IP_PERI0, 9, 0, 0),
+ GATE(PERI_CLK_I2C9, "clk_i2c9", "dout_aclk_peri_66",
+ EN_IP_PERI0, 10, 0, 0),
+ GATE(PERI_CLK_I2C4, "clk_i2c4", "dout_aclk_peri_66",
+ EN_IP_PERI0, 11, 0, 0),
+ GATE(PERI_CLK_I2C5, "clk_i2c5", "dout_aclk_peri_66",
+ EN_IP_PERI0, 12, 0, 0),
+ GATE(PERI_CLK_I2C6, "clk_i2c6", "dout_aclk_peri_66",
+ EN_IP_PERI0, 13, 0, 0),
+ GATE(PERI_CLK_I2C7, "clk_i2c7", "dout_aclk_peri_66",
+ EN_IP_PERI0, 14, 0, 0),
+ GATE(PERI_CLK_I2CHDMI, "clk_i2chdmi", "dout_aclk_peri_66",
+ EN_IP_PERI0, 15, 0, 0),
+ GATE(PERI_CLK_I2S, "clk_peri_i2s", "dout_aclk_peri_66",
+ EN_IP_PERI0, 16, 0, 0),
+ GATE(PERI_CLK_MCT, "clk_mct", "dout_aclk_peri_66",
+ EN_IP_PERI0, 17, 0, 0),
+ GATE(PERI_CLK_PCM, "clk_peri_pcm", "dout_aclk_peri_66",
+ EN_IP_PERI0, 18, 0, 0),
+ GATE(PERI_CLK_HSIC0, "clk_hsic0", "dout_aclk_peri_66",
+ EN_IP_PERI0, 20, 0, 0),
+ GATE(PERI_CLK_HSIC1, "clk_hsic1", "dout_aclk_peri_66",
+ EN_IP_PERI0, 21, 0, 0),
+ GATE(PERI_CLK_HSIC2, "clk_hsic2", "dout_aclk_peri_66",
+ EN_IP_PERI0, 22, 0, 0),
+ GATE(PERI_CLK_HSIC3, "clk_hsic3", "dout_aclk_peri_66",
+ EN_IP_PERI0, 23, 0, 0),
+ GATE(PERI_CLK_WDT_EGL, "clk_wdt_egl", "dout_aclk_peri_66",
+ EN_IP_PERI0, 24, 0, 0),
+ GATE(PERI_CLK_WDT_KFC, "clk_wdt_kfc", "dout_aclk_peri_66",
+ EN_IP_PERI0, 25, 0, 0),
+
+ GATE(PERI_CLK_UART4, "clk_uart4", "dout_aclk_peri_66",
+ EN_IP_PERI2, 0, 0, 0),
+ GATE(PERI_CLK_PWM, "clk_pwm", "dout_aclk_peri_66",
+ EN_IP_PERI2, 3, 0, 0),
+ GATE(PERI_CLK_SPDIF, "clk_spdif", "dout_aclk_peri_66",
+ EN_IP_PERI2, 6, 0, 0),
+ GATE(PERI_CLK_SPI0, "clk_spi0", "dout_aclk_peri_66",
+ EN_IP_PERI2, 7, 0, 0),
+ GATE(PERI_CLK_SPI1, "clk_spi1", "dout_aclk_peri_66",
+ EN_IP_PERI2, 8, 0, 0),
+ GATE(PERI_CLK_SPI2, "clk_spi2", "dout_aclk_peri_66",
+ EN_IP_PERI2, 9, 0, 0),
+ GATE(PERI_CLK_TMU0, "clk_tmu0", "dout_aclk_peri_66",
+ EN_IP_PERI2, 10, 0, 0),
+ GATE(PERI_CLK_TMU1, "clk_tmu1", "dout_aclk_peri_66",
+ EN_IP_PERI2, 11, 0, 0),
+ GATE(PERI_CLK_TMU2, "clk_tmu2", "dout_aclk_peri_66",
+ EN_IP_PERI2, 12, 0, 0),
+ GATE(PERI_CLK_TMU3, "clk_tmu3", "dout_aclk_peri_66",
+ EN_IP_PERI2, 13, 0, 0),
+ GATE(PERI_CLK_TMU4, "clk_tmu4", "dout_aclk_peri_66",
+ EN_IP_PERI2, 14, 0, 0),
+ GATE(PERI_CLK_ADC, "clk_adc", "dout_aclk_peri_66",
+ EN_IP_PERI2, 18, 0, 0),
+ GATE(PERI_CLK_UART0, "clk_uart0", "dout_aclk_peri_66",
+ EN_IP_PERI2, 19, 0, 0),
+ GATE(PERI_CLK_UART1, "clk_uart1", "dout_aclk_peri_66",
+ EN_IP_PERI2, 20, 0, 0),
+ GATE(PERI_CLK_UART2, "clk_uart2", "dout_aclk_peri_66",
+ EN_IP_PERI2, 21, 0, 0),
+
+ GATE(PERI_CLK_CHIPID, "clk_chipid", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_CHIPID, 2, 0, 0),
+
+ GATE(PERI_CLK_PROVKEY0, "clk_provkey0", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_PROVKEY0, 1, 0, 0),
+
+ GATE(PERI_CLK_PROVKEY1, "clk_provkey1", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_PROVKEY1, 2, 0, 0),
+
+ GATE(PERI_CLK_SECKEY, "clk_seckey", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_SECKEY, 5, 0, 0),
+
+ GATE(PERI_CLK_TOP_RTC, "clk_top_rtc", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TOP_RTC, 5, 0, 0),
+
+ GATE(PERI_CLK_TZPC0, "clk_tzpc0", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 10, 0, 0),
+ GATE(PERI_CLK_TZPC1, "clk_tzpc1", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 11, 0, 0),
+ GATE(PERI_CLK_TZPC2, "clk_tzpc2", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 12, 0, 0),
+ GATE(PERI_CLK_TZPC3, "clk_tzpc3", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 13, 0, 0),
+ GATE(PERI_CLK_TZPC4, "clk_tzpc4", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 14, 0, 0),
+ GATE(PERI_CLK_TZPC5, "clk_tzpc5", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 15, 0, 0),
+ GATE(PERI_CLK_TZPC6, "clk_tzpc6", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 16, 0, 0),
+ GATE(PERI_CLK_TZPC7, "clk_tzpc7", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 17, 0, 0),
+ GATE(PERI_CLK_TZPC8, "clk_tzpc8", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 18, 0, 0),
+ GATE(PERI_CLK_TZPC9, "clk_tzpc9", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 19, 0, 0),
+ GATE(PERI_CLK_TZPC10, "clk_tzpc10", "dout_aclk_peri_66",
+ EN_IP_PERI_SECURE_TZPC, 20, 0, 0),
+};
+
+static void __init exynos5260_clk_peri_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.mux_clks = peri_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
+ cmu.div_clks = peri_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(peri_div_clks);
+ cmu.gate_clks = peri_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(peri_gate_clks);
+ cmu.nr_clk_ids = PERI_NR_CLK;
+ cmu.clk_regs = peri_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
+ exynos5260_clk_peri_init);
+
+
+/* CMU_TOP */
+
+static unsigned long top_clk_regs[] __initdata = {
+ DISP_PLL_LOCK,
+ AUD_PLL_LOCK,
+ DISP_PLL_CON0,
+ DISP_PLL_CON1,
+ DISP_PLL_FDET,
+ AUD_PLL_CON0,
+ AUD_PLL_CON1,
+ AUD_PLL_CON2,
+ AUD_PLL_FDET,
+ MUX_SEL_TOP_PLL0,
+ MUX_SEL_TOP_MFC,
+ MUX_SEL_TOP_G2D,
+ MUX_SEL_TOP_GSCL,
+ MUX_SEL_TOP_ISP10,
+ MUX_SEL_TOP_ISP11,
+ MUX_SEL_TOP_DISP0,
+ MUX_SEL_TOP_DISP1,
+ MUX_SEL_TOP_BUS,
+ MUX_SEL_TOP_PERI0,
+ MUX_SEL_TOP_PERI1,
+ MUX_SEL_TOP_FSYS,
+ DIV_TOP_G2D_MFC,
+ DIV_TOP_GSCL_ISP0,
+ DIV_TOP_ISP10,
+ DIV_TOP_ISP11,
+ DIV_TOP_DISP,
+ DIV_TOP_BUS,
+ DIV_TOP_PERI0,
+ DIV_TOP_PERI1,
+ DIV_TOP_PERI2,
+ DIV_TOP_FSYS0,
+ DIV_TOP_FSYS1,
+ DIV_TOP_HPM,
+ DIV_TOP_PLL_FDET,
+ EN_ACLK_TOP,
+ EN_SCLK_TOP,
+ EN_IP_TOP,
+};
+
+/* fixed rate clocks generated inside the soc */
+struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
+ FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL,
+ CLK_IS_ROOT, 270000000),
+ FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL,
+ CLK_IS_ROOT, 270000000),
+ FRATE(PHYCLK_DPTX_PHY_CH1_TXD_CLK, "phyclk_dptx_phy_ch1_txd_clk", NULL,
+ CLK_IS_ROOT, 270000000),
+ FRATE(PHYCLK_DPTX_PHY_CH0_TXD_CLK, "phyclk_dptx_phy_ch0_txd_clk", NULL,
+ CLK_IS_ROOT, 270000000),
+ FRATE(phyclk_hdmi_phy_tmds_clko, "phyclk_hdmi_phy_tmds_clko", NULL,
+ CLK_IS_ROOT, 250000000),
+ FRATE(PHYCLK_HDMI_PHY_PIXEL_CLKO, "phyclk_hdmi_phy_pixel_clko", NULL,
+ CLK_IS_ROOT, 1660000000),
+ FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
+ NULL, CLK_IS_ROOT, 125000000),
+ FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
+ "phyclk_mipi_dphy_4l_m_txbyteclkhs" , NULL,
+ CLK_IS_ROOT, 187500000),
+ FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
+ NULL, CLK_IS_ROOT, 24000000),
+ FRATE(PHYCLK_DPTX_PHY_CLK_DIV2, "phyclk_dptx_phy_clk_div2", NULL,
+ CLK_IS_ROOT, 135000000),
+ FRATE(PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0,
+ "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL,
+ CLK_IS_ROOT, 20000000),
+ FRATE(PHYCLK_USBHOST20_PHY_PHYCLOCK, "phyclk_usbhost20_phy_phyclock",
+ NULL, CLK_IS_ROOT, 60000000),
+ FRATE(PHYCLK_USBHOST20_PHY_FREECLK, "phyclk_usbhost20_phy_freeclk",
+ NULL, CLK_IS_ROOT, 60000000),
+ FRATE(PHYCLK_USBHOST20_PHY_CLK48MOHCI,
+ "phyclk_usbhost20_phy_clk48mohci",
+ NULL, CLK_IS_ROOT, 48000000),
+ FRATE(PHYCLK_USBDRD30_UDRD30_PIPE_PCLK,
+ "phyclk_usbdrd30_udrd30_pipe_pclk", NULL,
+ CLK_IS_ROOT, 125000000),
+ FRATE(PHYCLK_USBDRD30_UDRD30_PHYCLOCK,
+ "phyclk_usbdrd30_udrd30_phyclock", NULL,
+ CLK_IS_ROOT, 60000000),
+};
+
+PNAME(mout_memtop_pll_user_p) = {"fin_pll", "dout_mem_pll"};
+PNAME(mout_bustop_pll_user_p) = {"fin_pll", "dout_bus_pll"};
+PNAME(mout_mediatop_pll_user_p) = {"fin_pll", "dout_media_pll"};
+PNAME(mout_audtop_pll_user_p) = {"fin_pll", "mout_aud_pll"};
+PNAME(mout_aud_pll_p) = {"fin_pll", "fout_aud_pll"};
+PNAME(mout_disp_pll_p) = {"fin_pll", "fout_disp_pll"};
+PNAME(mout_mfc_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_mfc_333_p) = {"mout_mediatop_pll_user", "mout_mfc_bustop_333"};
+PNAME(mout_g2d_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_g2d_333_p) = {"mout_mediatop_pll_user", "mout_g2d_bustop_333"};
+PNAME(mout_gscl_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_gscl_333_p) = {"mout_mediatop_pll_user",
+ "mout_gscl_bustop_333"};
+PNAME(mout_m2m_mediatop_400_p) = {"mout_mediatop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_gscl_400_p) = {"mout_bustop_pll_user",
+ "mout_m2m_mediatop_400"};
+PNAME(mout_gscl_bustop_fimc_p) = {"mout_bustop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_gscl_fimc_p) = {"mout_mediatop_pll_user",
+ "mout_gscl_bustop_fimc"};
+PNAME(mout_isp1_media_266_p) = {"mout_mediatop_pll_user",
+ "mout_memtop_pll_user"};
+PNAME(mout_aclk_isp1_266_p) = {"mout_bustop_pll_user", "mout_isp1_media_266"};
+PNAME(mout_isp1_media_400_p) = {"mout_mediatop_pll_user", "mout_disp_pll"};
+PNAME(mout_aclk_isp1_400_p) = {"mout_bustop_pll_user", "mout_isp1_media_400"};
+PNAME(mout_sclk_isp_spi_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_isp_uart_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_isp_sensor_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_disp_disp_333_p) = {"mout_disp_pll", "mout_bustop_pll_user"};
+PNAME(mout_aclk_disp_333_p) = {"mout_mediatop_pll_user", "mout_disp_disp_333"};
+PNAME(mout_disp_disp_222_p) = {"mout_disp_pll", "mout_bustop_pll_user"};
+PNAME(mout_aclk_disp_222_p) = {"mout_mediatop_pll_user", "mout_disp_disp_222"};
+PNAME(mout_disp_media_pixel_p) = {"mout_mediatop_pll_user",
+ "mout_bustop_pll_user"};
+PNAME(mout_sclk_disp_pixel_p) = {"mout_disp_pll", "mout_disp_media_pixel"};
+PNAME(mout_bus_bustop_400_p) = {"mout_bustop_pll_user", "mout_memtop_pll_user"};
+PNAME(mout_bus_bustop_100_p) = {"mout_bustop_pll_user", "mout_memtop_pll_user"};
+PNAME(mout_sclk_peri_spi_clk_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_peri_uart_uclk_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_fsys_usb_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_fsys_mmc_sdclkin_a_p) = {"fin_pll", "mout_bustop_pll_user"};
+PNAME(mout_sclk_fsys_mmc0_sdclkin_b_p) = {"mout_sclk_fsys_mmc0_sdclkin_a",
+ "mout_mediatop_pll_user"};
+PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a",
+ "mout_mediatop_pll_user"};
+PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a",
+ "mout_mediatop_pll_user"};
+
+struct samsung_mux_clock top_mux_clks[] __initdata = {
+ MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user",
+ mout_mediatop_pll_user_p,
+ MUX_SEL_TOP_PLL0, 0, 1),
+ MUX(TOP_MOUT_MEMTOP_PLL_USER, "mout_memtop_pll_user",
+ mout_memtop_pll_user_p,
+ MUX_SEL_TOP_PLL0, 4, 1),
+ MUX(TOP_MOUT_BUSTOP_PLL_USER, "mout_bustop_pll_user",
+ mout_bustop_pll_user_p,
+ MUX_SEL_TOP_PLL0, 8, 1),
+ MUX(TOP_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p,
+ MUX_SEL_TOP_PLL0, 12, 1),
+ MUX(TOP_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p,
+ MUX_SEL_TOP_PLL0, 16, 1),
+ MUX(TOP_MOUT_AUDTOP_PLL_USER, "mout_audtop_pll_user",
+ mout_audtop_pll_user_p,
+ MUX_SEL_TOP_PLL0, 24, 1),
+
+ MUX(TOP_MOUT_DISP_DISP_333, "mout_disp_disp_333", mout_disp_disp_333_p,
+ MUX_SEL_TOP_DISP0, 0, 1),
+ MUX(TOP_MOUT_ACLK_DISP_333, "mout_aclk_disp_333", mout_aclk_disp_333_p,
+ MUX_SEL_TOP_DISP0, 8, 1),
+ MUX(TOP_MOUT_DISP_DISP_222, "mout_disp_disp_222", mout_disp_disp_222_p,
+ MUX_SEL_TOP_DISP0, 12, 1),
+ MUX(TOP_MOUT_ACLK_DISP_222, "mout_aclk_disp_222", mout_aclk_disp_222_p,
+ MUX_SEL_TOP_DISP0, 20, 1),
+
+ MUX(TOP_MOUT_FIMD1, "mout_sclk_disp_pixel", mout_sclk_disp_pixel_p,
+ MUX_SEL_TOP_DISP1, 0, 1),
+ MUX(TOP_MOUT_DISP_MEDIA_PIXEL, "mout_disp_media_pixel",
+ mout_disp_media_pixel_p,
+ MUX_SEL_TOP_DISP1, 8, 1),
+
+ MUX(TOP_MOUT_SCLK_PERI_SPI2_CLK, "mout_sclk_peri_spi2_clk",
+ mout_sclk_peri_spi_clk_p,
+ MUX_SEL_TOP_PERI1, 0, 1),
+ MUX(TOP_MOUT_SCLK_PERI_SPI1_CLK, "mout_sclk_peri_spi1_clk",
+ mout_sclk_peri_spi_clk_p,
+ MUX_SEL_TOP_PERI1, 4, 1),
+ MUX(TOP_MOUT_SCLK_PERI_SPI0_CLK, "mout_sclk_peri_spi0_clk",
+ mout_sclk_peri_spi_clk_p,
+ MUX_SEL_TOP_PERI1, 8, 1),
+ MUX(TOP_MOUT_SCLK_PERI_UART1_UCLK, "mout_sclk_peri_uart1_uclk",
+ mout_sclk_peri_uart_uclk_p,
+ MUX_SEL_TOP_PERI1, 12, 1),
+ MUX(TOP_MOUT_SCLK_PERI_UART2_UCLK, "mout_sclk_peri_uart2_uclk",
+ mout_sclk_peri_uart_uclk_p,
+ MUX_SEL_TOP_PERI1, 16, 1),
+ MUX(TOP_MOUT_SCLK_PERI_UART0_UCLK, "mout_sclk_peri_uart0_uclk",
+ mout_sclk_peri_uart_uclk_p,
+ MUX_SEL_TOP_PERI1, 20, 1),
+
+
+ MUX(TOP_MOUT_BUS1_BUSTOP_400, "mout_bus1_bustop_400",
+ mout_bus_bustop_400_p,
+ MUX_SEL_TOP_BUS, 0, 1),
+ MUX(TOP_MOUT_BUS1_BUSTOP_100, "mout_bus1_bustop_100",
+ mout_bus_bustop_100_p,
+ MUX_SEL_TOP_BUS, 4, 1),
+ MUX(TOP_MOUT_BUS2_BUSTOP_100, "mout_bus2_bustop_100",
+ mout_bus_bustop_100_p,
+ MUX_SEL_TOP_BUS, 8, 1),
+ MUX(TOP_MOUT_BUS2_BUSTOP_400, "mout_bus2_bustop_400",
+ mout_bus_bustop_400_p,
+ MUX_SEL_TOP_BUS, 12, 1),
+ MUX(TOP_MOUT_BUS3_BUSTOP_400, "mout_bus3_bustop_400",
+ mout_bus_bustop_400_p,
+ MUX_SEL_TOP_BUS, 16, 1),
+ MUX(TOP_MOUT_BUS3_BUSTOP_100, "mout_bus3_bustop_100",
+ mout_bus_bustop_100_p,
+ MUX_SEL_TOP_BUS, 20, 1),
+ MUX(TOP_MOUT_BUS4_BUSTOP_400, "mout_bus4_bustop_400",
+ mout_bus_bustop_400_p,
+ MUX_SEL_TOP_BUS, 24, 1),
+ MUX(TOP_MOUT_BUS4_BUSTOP_100, "mout_bus4_bustop_100",
+ mout_bus_bustop_100_p,
+ MUX_SEL_TOP_BUS, 28, 1),
+
+ MUX(TOP_MOUT_SCLK_FSYS_USB, "mout_sclk_fsys_usb",
+ mout_sclk_fsys_usb_p,
+ MUX_SEL_TOP_FSYS, 0, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A, "mout_sclk_fsys_mmc2_sdclkin_a",
+ mout_sclk_fsys_mmc_sdclkin_a_p,
+ MUX_SEL_TOP_FSYS, 4, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_B, "mout_sclk_fsys_mmc2_sdclkin_b",
+ mout_sclk_fsys_mmc2_sdclkin_b_p,
+ MUX_SEL_TOP_FSYS, 8, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A, "mout_sclk_fsys_mmc1_sdclkin_a",
+ mout_sclk_fsys_mmc_sdclkin_a_p,
+ MUX_SEL_TOP_FSYS, 12, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_B, "mout_sclk_fsys_mmc1_sdclkin_b",
+ mout_sclk_fsys_mmc1_sdclkin_b_p,
+ MUX_SEL_TOP_FSYS, 16, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A, "mout_sclk_fsys_mmc0_sdclkin_a",
+ mout_sclk_fsys_mmc_sdclkin_a_p,
+ MUX_SEL_TOP_FSYS, 20, 1),
+ MUX(TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_B, "mout_sclk_fsys_mmc0_sdclkin_b",
+ mout_sclk_fsys_mmc0_sdclkin_b_p,
+ MUX_SEL_TOP_FSYS, 24, 1),
+
+ MUX(TOP_MOUT_ISP1_MEDIA_400, "mout_isp1_media_400",
+ mout_isp1_media_400_p,
+ MUX_SEL_TOP_ISP10, 4, 1),
+ MUX(TOP_MOUT_ACLK_ISP1_400, "mout_aclk_isp1_400", mout_aclk_isp1_400_p,
+ MUX_SEL_TOP_ISP10, 8 , 1),
+ MUX(TOP_MOUT_ISP1_MEDIA_266, "mout_isp1_media_266",
+ mout_isp1_media_266_p,
+ MUX_SEL_TOP_ISP10, 16, 1),
+ MUX(TOP_MOUT_ACLK_ISP1_266, "mout_aclk_isp1_266", mout_aclk_isp1_266_p,
+ MUX_SEL_TOP_ISP10, 20, 1),
+
+ MUX(TOP_MOUT_SCLK_ISP1_SPI0, "mout_sclk_isp1_spi0", mout_sclk_isp_spi_p,
+ MUX_SEL_TOP_ISP11, 4, 1),
+ MUX(TOP_MOUT_SCLK_ISP1_SPI1, "mout_sclk_isp1_spi1", mout_sclk_isp_spi_p,
+ MUX_SEL_TOP_ISP11, 8, 1),
+ MUX(TOP_MOUT_SCLK_ISP1_UART, "mout_sclk_isp1_uart",
+ mout_sclk_isp_uart_p,
+ MUX_SEL_TOP_ISP11, 12, 1),
+ MUX(TOP_MOUT_SCLK_ISP1_SENSOR0, "mout_sclk_isp1_sensor0",
+ mout_sclk_isp_sensor_p,
+ MUX_SEL_TOP_ISP11, 16, 1),
+ MUX(TOP_MOUT_SCLK_ISP1_SENSOR1, "mout_sclk_isp1_sensor1",
+ mout_sclk_isp_sensor_p,
+ MUX_SEL_TOP_ISP11, 20, 1),
+ MUX(TOP_MOUT_SCLK_ISP1_SENSOR2, "mout_sclk_isp1_sensor2",
+ mout_sclk_isp_sensor_p,
+ MUX_SEL_TOP_ISP11, 24, 1),
+
+ MUX(TOP_MOUT_MFC_BUSTOP_333, "mout_mfc_bustop_333",
+ mout_mfc_bustop_333_p,
+ MUX_SEL_TOP_MFC, 4, 1),
+ MUX(TOP_MOUT_ACLK_MFC_333, "mout_aclk_mfc_333", mout_aclk_mfc_333_p,
+ MUX_SEL_TOP_MFC, 8, 1),
+
+ MUX(TOP_MOUT_G2D_BUSTOP_333, "mout_g2d_bustop_333",
+ mout_g2d_bustop_333_p,
+ MUX_SEL_TOP_G2D, 4, 1),
+ MUX(TOP_MOUT_ACLK_G2D_333, "mout_aclk_g2d_333", mout_aclk_g2d_333_p,
+ MUX_SEL_TOP_G2D, 8, 1),
+
+ MUX(TOP_MOUT_M2M_MEDIATOP_400, "mout_m2m_mediatop_400",
+ mout_m2m_mediatop_400_p,
+ MUX_SEL_TOP_GSCL, 0, 1),
+ MUX(TOP_MOUT_ACLK_GSCL_400, "mout_aclk_gscl_400",
+ mout_aclk_gscl_400_p,
+ MUX_SEL_TOP_GSCL, 4, 1),
+ MUX(TOP_MOUT_GSCL_BUSTOP_333, "mout_gscl_bustop_333",
+ mout_gscl_bustop_333_p,
+ MUX_SEL_TOP_GSCL, 8, 1),
+ MUX(TOP_MOUT_ACLK_GSCL_333, "mout_aclk_gscl_333",
+ mout_aclk_gscl_333_p,
+ MUX_SEL_TOP_GSCL, 12, 1),
+ MUX(TOP_MOUT_GSCL_BUSTOP_FIMC, "mout_gscl_bustop_fimc",
+ mout_gscl_bustop_fimc_p,
+ MUX_SEL_TOP_GSCL, 16, 1),
+ MUX(TOP_MOUT_ACLK_GSCL_FIMC, "mout_aclk_gscl_fimc",
+ mout_aclk_gscl_fimc_p,
+ MUX_SEL_TOP_GSCL, 20, 1),
+};
+
+struct samsung_div_clock top_div_clks[] __initdata = {
+ DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333",
+ DIV_TOP_G2D_MFC, 0, 3),
+ DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333",
+ DIV_TOP_G2D_MFC, 4, 3),
+
+ DIV(TOP_DOUT_ACLK_GSCL_333, "dout_aclk_gscl_333", "mout_aclk_gscl_333",
+ DIV_TOP_GSCL_ISP0, 0, 3),
+ DIV(TOP_DOUT_ACLK_GSCL_400, "dout_aclk_gscl_400", "mout_aclk_gscl_400",
+ DIV_TOP_GSCL_ISP0, 4, 3),
+ DIV(TOP_DOUT_ACLK_GSCL_FIMC, "dout_aclk_gscl_fimc",
+ "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 8, 3),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR0_A, "dout_sclk_isp1_sensor0_a",
+ "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 16, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR1_A, "dout_sclk_isp1_sensor1_a",
+ "mout_aclk_gscl_400", DIV_TOP_GSCL_ISP0, 20, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR2_A, "dout_sclk_isp1_sensor2_a",
+ "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 24, 4),
+
+ DIV(TOP_DOUT_ACLK_ISP1_266, "dout_aclk_isp1_266", "mout_aclk_isp1_266",
+ DIV_TOP_ISP10, 0, 3),
+ DIV(TOP_DOUT_ACLK_ISP1_400, "dout_aclk_isp1_400", "mout_aclk_isp1_400",
+ DIV_TOP_ISP10, 4, 3),
+ DIV(TOP_DOUT_SCLK_ISP1_SPI0_A, "dout_sclk_isp1_spi0_a",
+ "mout_sclk_isp1_spi0", DIV_TOP_ISP10, 12, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SPI0_B, "dout_sclk_isp1_spi0_b",
+ "dout_sclk_isp1_spi0_a", DIV_TOP_ISP10, 16, 8),
+
+ DIV(TOP_DOUT_SCLK_ISP1_SPI1_A, "dout_sclk_isp1_spi1_a",
+ "mout_sclk_isp1_spi1", DIV_TOP_ISP11, 0, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SPI1_B, "dout_sclk_isp1_spi1_b",
+ "dout_sclk_isp1_spi1_a", DIV_TOP_ISP11, 4, 8),
+ DIV(TOP_DOUT_SCLK_ISP1_UART, "dout_sclk_isp1_uart",
+ "mout_sclk_isp1_uart", DIV_TOP_ISP11, 12, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR0_B, "dout_sclk_isp1_sensor0_b",
+ "dout_sclk_isp1_sensor0_a", DIV_TOP_ISP11, 16, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR1_B, "dout_sclk_isp1_sensor1_b",
+ "dout_sclk_isp1_sensor1_a", DIV_TOP_ISP11, 20, 4),
+ DIV(TOP_DOUT_SCLK_ISP1_SENSOR2_B, "dout_sclk_isp1_sensor2_b",
+ "dout_sclk_isp1_sensor2_a", DIV_TOP_ISP11, 24, 4),
+
+ DIV(TOP_DOUTTOP__SCLK_HPM_TARGETCLK, "dout_sclk_hpm_targetclk",
+ "mout_bustop_pll_user", DIV_TOP_HPM, 0, 3),
+
+ DIV(TOP_DOUT_ACLK_DISP_333, "dout_aclk_disp_333", "mout_aclk_disp_333",
+ DIV_TOP_DISP, 0, 3),
+ DIV(TOP_DOUT_ACLK_DISP_222, "dout_aclk_disp_222", "mout_aclk_disp_222",
+ DIV_TOP_DISP, 4, 3),
+ DIV(TOP_DOUT_SCLK_DISP_PIXEL, "dout_sclk_disp_pixel",
+ "mout_sclk_disp_pixel", DIV_TOP_DISP, 8, 3),
+
+ DIV(TOP_DOUT_ACLK_BUS1_400, "dout_aclk_bus1_400",
+ "mout_bus1_bustop_400", DIV_TOP_BUS, 0, 3),
+ DIV(TOP_DOUT_ACLK_BUS1_100, "dout_aclk_bus1_100",
+ "mout_bus1_bustop_100", DIV_TOP_BUS, 4, 4),
+ DIV(TOP_DOUT_ACLK_BUS2_400, "dout_aclk_bus2_400",
+ "mout_bus2_bustop_400", DIV_TOP_BUS, 8, 3),
+ DIV(TOP_DOUT_ACLK_BUS2_100, "dout_aclk_bus2_100",
+ "mout_bus2_bustop_100", DIV_TOP_BUS, 12, 4),
+ DIV(TOP_DOUT_ACLK_BUS3_400, "dout_aclk_bus3_400",
+ "mout_bus3_bustop_400", DIV_TOP_BUS, 16, 3),
+ DIV(TOP_DOUT_ACLK_BUS3_100, "dout_aclk_bus3_100",
+ "mout_bus3_bustop_100", DIV_TOP_BUS, 20, 4),
+ DIV(TOP_DOUT_ACLK_BUS4_400, "dout_aclk_bus4_400",
+ "mout_bus4_bustop_400", DIV_TOP_BUS, 24, 3),
+ DIV(TOP_DOUT_ACLK_BUS4_100, "dout_aclk_bus4_100",
+ "mout_bus4_bustop_100", DIV_TOP_BUS, 28, 4),
+
+ DIV(TOP_DOUT_SCLK_PERI_SPI0_A, "dout_sclk_peri_spi0_a",
+ "mout_sclk_peri_spi0_clk", DIV_TOP_PERI0, 4, 4),
+ DIV(TOP_DOUT_SCLK_PERI_SPI0_B, "dout_sclk_peri_spi0_b",
+ "dout_sclk_peri_spi0_a", DIV_TOP_PERI0, 8, 8),
+ DIV(TOP_DOUT_SCLK_PERI_SPI1_A, "dout_sclk_peri_spi1_a",
+ "mout_sclk_peri_spi1_clk", DIV_TOP_PERI0, 16, 4),
+ DIV(TOP_DOUT_SCLK_PERI_SPI1_B, "dout_sclk_peri_spi1_b",
+ "dout_sclk_peri_spi1_a", DIV_TOP_PERI0, 20, 8),
+
+ DIV(TOP_DOUT_SCLK_PERI_SPI2_A, "dout_sclk_peri_spi2_a",
+ "mout_sclk_peri_spi2_clk", DIV_TOP_PERI1, 0, 4),
+ DIV(TOP_DOUT_SCLK_PERI_SPI2_B, "dout_sclk_peri_spi2_b",
+ "dout_sclk_peri_spi2_a", DIV_TOP_PERI1, 4, 8),
+ DIV(TOP_DOUT_SCLK_PERI_UART1, "dout_sclk_peri_uart1",
+ "mout_sclk_peri_uart1_uclk", DIV_TOP_PERI1, 16, 4),
+ DIV(TOP_DOUT_SCLK_PERI_UART2, "dout_sclk_peri_uart2",
+ "mout_sclk_peri_uart2_uclk", DIV_TOP_PERI1, 20, 4),
+ DIV(TOP_DOUT_SCLK_PERI_UART0, "dout_sclk_peri_uart0",
+ "mout_sclk_peri_uart0_uclk", DIV_TOP_PERI1, 24, 4),
+
+ DIV(TOP_DOUT_ACLK_PERI_66, "dout_aclk_peri_66", "mout_bustop_pll_user",
+ DIV_TOP_PERI2, 20, 4),
+ DIV(TOP_DOUT_ACLK_PERI_AUD, "dout_aclk_peri_aud",
+ "mout_audtop_pll_user", DIV_TOP_PERI2, 24, 3),
+
+ DIV(TOP_DOUT_ACLK_FSYS_200, "dout_aclk_fsys_200",
+ "mout_bustop_pll_user", DIV_TOP_FSYS0, 0, 3),
+ DIV(TOP_DOUT_SCLK_FSYS_USBDRD30_SUSPEND_CLK,
+ "dout_sclk_fsys_usbdrd30_suspend_clk",
+ "mout_sclk_fsys_usb", DIV_TOP_FSYS0, 4, 4),
+ DIV(TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_A, "dout_sclk_fsys_mmc0_sdclkin_a",
+ "mout_sclk_fsys_mmc0_sdclkin_b",
+ DIV_TOP_FSYS0, 12, 4),
+ DIV(TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_B, "dout_sclk_fsys_mmc0_sdclkin_b",
+ "dout_sclk_fsys_mmc0_sdclkin_a",
+ DIV_TOP_FSYS0, 16, 8),
+
+
+ DIV(TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_A, "dout_sclk_fsys_mmc1_sdclkin_a",
+ "mout_sclk_fsys_mmc1_sdclkin_b",
+ DIV_TOP_FSYS1, 0, 4),
+ DIV(TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_B, "dout_sclk_fsys_mmc1_sdclkin_b",
+ "dout_sclk_fsys_mmc1_sdclkin_a",
+ DIV_TOP_FSYS1, 4, 8),
+ DIV(TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_A, "dout_sclk_fsys_mmc2_sdclkin_a",
+ "mout_sclk_fsys_mmc2_sdclkin_b",
+ DIV_TOP_FSYS1, 12, 4),
+ DIV(TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_B, "dout_sclk_fsys_mmc2_sdclkin_b",
+ "dout_sclk_fsys_mmc2_sdclkin_a",
+ DIV_TOP_FSYS1, 16, 8),
+
+};
+
+struct samsung_gate_clock top_gate_clks[] __initdata = {
+ GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin",
+ "dout_sclk_fsys_mmc0_sdclkin_b",
+ EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(TOP_SCLK_MMC1, "sclk_fsys_mmc1_sdclkin",
+ "dout_sclk_fsys_mmc1_sdclkin_b",
+ EN_SCLK_TOP, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(TOP_SCLK_MMC2, "sclk_fsys_mmc2_sdclkin",
+ "dout_sclk_fsys_mmc2_sdclkin_b",
+ EN_SCLK_TOP, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(TOP_SCLK_FIMD1, "sclk_disp_pixel", "dout_sclk_disp_pixel",
+ EN_ACLK_TOP, 10, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_pll_clock top_pll_clks[] __initdata = {
+ PLL(pll_2550xx, TOP_FOUT_DISP_PLL, "fout_disp_pll", "fin_pll",
+ DISP_PLL_LOCK, DISP_PLL_CON0,
+ pll2550_24mhz_tbl),
+ PLL(pll_2650xx, TOP_FOUT_AUD_PLL, "fout_aud_pll", "fin_pll",
+ AUD_PLL_LOCK, AUD_PLL_CON0,
+ pll2650_24mhz_tbl),
+};
+
+static void __init exynos5260_clk_top_init(struct device_node *np)
+{
+ struct exynos5260_cmu_info cmu = {0};
+
+ cmu.pll_clks = top_pll_clks;
+ cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks);
+ cmu.mux_clks = top_mux_clks;
+ cmu.nr_mux_clks = ARRAY_SIZE(top_mux_clks);
+ cmu.div_clks = top_div_clks;
+ cmu.nr_div_clks = ARRAY_SIZE(top_div_clks);
+ cmu.gate_clks = top_gate_clks;
+ cmu.nr_gate_clks = ARRAY_SIZE(top_gate_clks);
+ cmu.fixed_clks = fixed_rate_clks;
+ cmu.nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks);
+ cmu.nr_clk_ids = TOP_NR_CLK;
+ cmu.clk_regs = top_clk_regs;
+ cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
+
+ exynos5260_cmu_register_one(np, &cmu);
+}
+
+CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
+ exynos5260_clk_top_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.h b/drivers/clk/samsung/clk-exynos5260.h
new file mode 100644
index 000000000000..d739716d6ea1
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5260.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Rahul Sharma <rahul.sharma@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5260 SoC.
+ */
+
+#ifndef __CLK_EXYNOS5260_H
+#define __CLK_EXYNOS5260_H
+
+/*
+*Registers for CMU_AUD
+*/
+#define MUX_SEL_AUD 0x0200
+#define MUX_ENABLE_AUD 0x0300
+#define MUX_STAT_AUD 0x0400
+#define MUX_IGNORE_AUD 0x0500
+#define DIV_AUD0 0x0600
+#define DIV_AUD1 0x0604
+#define DIV_STAT_AUD0 0x0700
+#define DIV_STAT_AUD1 0x0704
+#define EN_ACLK_AUD 0x0800
+#define EN_PCLK_AUD 0x0900
+#define EN_SCLK_AUD 0x0a00
+#define EN_IP_AUD 0x0b00
+
+/*
+*Registers for CMU_DISP
+*/
+#define MUX_SEL_DISP0 0x0200
+#define MUX_SEL_DISP1 0x0204
+#define MUX_SEL_DISP2 0x0208
+#define MUX_SEL_DISP3 0x020C
+#define MUX_SEL_DISP4 0x0210
+#define MUX_ENABLE_DISP0 0x0300
+#define MUX_ENABLE_DISP1 0x0304
+#define MUX_ENABLE_DISP2 0x0308
+#define MUX_ENABLE_DISP3 0x030c
+#define MUX_ENABLE_DISP4 0x0310
+#define MUX_STAT_DISP0 0x0400
+#define MUX_STAT_DISP1 0x0404
+#define MUX_STAT_DISP2 0x0408
+#define MUX_STAT_DISP3 0x040c
+#define MUX_STAT_DISP4 0x0410
+#define MUX_IGNORE_DISP0 0x0500
+#define MUX_IGNORE_DISP1 0x0504
+#define MUX_IGNORE_DISP2 0x0508
+#define MUX_IGNORE_DISP3 0x050c
+#define MUX_IGNORE_DISP4 0x0510
+#define DIV_DISP 0x0600
+#define DIV_STAT_DISP 0x0700
+#define EN_ACLK_DISP 0x0800
+#define EN_PCLK_DISP 0x0900
+#define EN_SCLK_DISP0 0x0a00
+#define EN_SCLK_DISP1 0x0a04
+#define EN_IP_DISP 0x0b00
+#define EN_IP_DISP_BUS 0x0b04
+
+
+/*
+*Registers for CMU_EGL
+*/
+#define EGL_PLL_LOCK 0x0000
+#define EGL_DPLL_LOCK 0x0004
+#define EGL_PLL_CON0 0x0100
+#define EGL_PLL_CON1 0x0104
+#define EGL_PLL_FREQ_DET 0x010c
+#define EGL_DPLL_CON0 0x0110
+#define EGL_DPLL_CON1 0x0114
+#define EGL_DPLL_FREQ_DET 0x011c
+#define MUX_SEL_EGL 0x0200
+#define MUX_ENABLE_EGL 0x0300
+#define MUX_STAT_EGL 0x0400
+#define DIV_EGL 0x0600
+#define DIV_EGL_PLL_FDET 0x0604
+#define DIV_STAT_EGL 0x0700
+#define DIV_STAT_EGL_PLL_FDET 0x0704
+#define EN_ACLK_EGL 0x0800
+#define EN_PCLK_EGL 0x0900
+#define EN_SCLK_EGL 0x0a00
+#define EN_IP_EGL 0x0b00
+#define CLKOUT_CMU_EGL 0x0c00
+#define CLKOUT_CMU_EGL_DIV_STAT 0x0c04
+#define ARMCLK_STOPCTRL 0x1000
+#define EAGLE_EMA_CTRL 0x1008
+#define EAGLE_EMA_STATUS 0x100c
+#define PWR_CTRL 0x1020
+#define PWR_CTRL2 0x1024
+#define CLKSTOP_CTRL 0x1028
+#define INTR_SPREAD_EN 0x1080
+#define INTR_SPREAD_USE_STANDBYWFI 0x1084
+#define INTR_SPREAD_BLOCKING_DURATION 0x1088
+#define CMU_EGL_SPARE0 0x2000
+#define CMU_EGL_SPARE1 0x2004
+#define CMU_EGL_SPARE2 0x2008
+#define CMU_EGL_SPARE3 0x200c
+#define CMU_EGL_SPARE4 0x2010
+
+/*
+*Registers for CMU_FSYS
+*/
+
+#define MUX_SEL_FSYS0 0x0200
+#define MUX_SEL_FSYS1 0x0204
+#define MUX_ENABLE_FSYS0 0x0300
+#define MUX_ENABLE_FSYS1 0x0304
+#define MUX_STAT_FSYS0 0x0400
+#define MUX_STAT_FSYS1 0x0404
+#define MUX_IGNORE_FSYS0 0x0500
+#define MUX_IGNORE_FSYS1 0x0504
+#define EN_ACLK_FSYS 0x0800
+#define EN_ACLK_FSYS_SECURE_RTIC 0x0804
+#define EN_ACLK_FSYS_SECURE_SMMU_RTIC 0x0808
+#define EN_PCLK_FSYS 0x0900
+#define EN_SCLK_FSYS 0x0a00
+#define EN_IP_FSYS 0x0b00
+#define EN_IP_FSYS_SECURE_RTIC 0x0b04
+#define EN_IP_FSYS_SECURE_SMMU_RTIC 0x0b08
+
+/*
+*Registers for CMU_G2D
+*/
+
+#define MUX_SEL_G2D 0x0200
+#define MUX_ENABLE_G2D 0x0300
+#define MUX_STAT_G2D 0x0400
+#define DIV_G2D 0x0600
+#define DIV_STAT_G2D 0x0700
+#define EN_ACLK_G2D 0x0800
+#define EN_ACLK_G2D_SECURE_SSS 0x0804
+#define EN_ACLK_G2D_SECURE_SLIM_SSS 0x0808
+#define EN_ACLK_G2D_SECURE_SMMU_SLIM_SSS 0x080c
+#define EN_ACLK_G2D_SECURE_SMMU_SSS 0x0810
+#define EN_ACLK_G2D_SECURE_SMMU_MDMA 0x0814
+#define EN_ACLK_G2D_SECURE_SMMU_G2D 0x0818
+#define EN_PCLK_G2D 0x0900
+#define EN_PCLK_G2D_SECURE_SMMU_SLIM_SSS 0x0904
+#define EN_PCLK_G2D_SECURE_SMMU_SSS 0x0908
+#define EN_PCLK_G2D_SECURE_SMMU_MDMA 0x090c
+#define EN_PCLK_G2D_SECURE_SMMU_G2D 0x0910
+#define EN_IP_G2D 0x0b00
+#define EN_IP_G2D_SECURE_SSS 0x0b04
+#define EN_IP_G2D_SECURE_SLIM_SSS 0x0b08
+#define EN_IP_G2D_SECURE_SMMU_SLIM_SSS 0x0b0c
+#define EN_IP_G2D_SECURE_SMMU_SSS 0x0b10
+#define EN_IP_G2D_SECURE_SMMU_MDMA 0x0b14
+#define EN_IP_G2D_SECURE_SMMU_G2D 0x0b18
+
+/*
+*Registers for CMU_G3D
+*/
+
+#define G3D_PLL_LOCK 0x0000
+#define G3D_PLL_CON0 0x0100
+#define G3D_PLL_CON1 0x0104
+#define G3D_PLL_FDET 0x010c
+#define MUX_SEL_G3D 0x0200
+#define MUX_EN_G3D 0x0300
+#define MUX_STAT_G3D 0x0400
+#define MUX_IGNORE_G3D 0x0500
+#define DIV_G3D 0x0600
+#define DIV_G3D_PLL_FDET 0x0604
+#define DIV_STAT_G3D 0x0700
+#define DIV_STAT_G3D_PLL_FDET 0x0704
+#define EN_ACLK_G3D 0x0800
+#define EN_PCLK_G3D 0x0900
+#define EN_SCLK_G3D 0x0a00
+#define EN_IP_G3D 0x0b00
+#define CLKOUT_CMU_G3D 0x0c00
+#define CLKOUT_CMU_G3D_DIV_STAT 0x0c04
+#define G3DCLK_STOPCTRL 0x1000
+#define G3D_EMA_CTRL 0x1008
+#define G3D_EMA_STATUS 0x100c
+
+/*
+*Registers for CMU_GSCL
+*/
+
+#define MUX_SEL_GSCL 0x0200
+#define MUX_EN_GSCL 0x0300
+#define MUX_STAT_GSCL 0x0400
+#define MUX_IGNORE_GSCL 0x0500
+#define DIV_GSCL 0x0600
+#define DIV_STAT_GSCL 0x0700
+#define EN_ACLK_GSCL 0x0800
+#define EN_ACLK_GSCL_FIMC 0x0804
+#define EN_ACLK_GSCL_SECURE_SMMU_GSCL0 0x0808
+#define EN_ACLK_GSCL_SECURE_SMMU_GSCL1 0x080c
+#define EN_ACLK_GSCL_SECURE_SMMU_MSCL0 0x0810
+#define EN_ACLK_GSCL_SECURE_SMMU_MSCL1 0x0814
+#define EN_PCLK_GSCL 0x0900
+#define EN_PCLK_GSCL_FIMC 0x0904
+#define EN_PCLK_GSCL_SECURE_SMMU_GSCL0 0x0908
+#define EN_PCLK_GSCL_SECURE_SMMU_GSCL1 0x090c
+#define EN_PCLK_GSCL_SECURE_SMMU_MSCL0 0x0910
+#define EN_PCLK_GSCL_SECURE_SMMU_MSCL1 0x0914
+#define EN_SCLK_GSCL 0x0a00
+#define EN_SCLK_GSCL_FIMC 0x0a04
+#define EN_IP_GSCL 0x0b00
+#define EN_IP_GSCL_FIMC 0x0b04
+#define EN_IP_GSCL_SECURE_SMMU_GSCL0 0x0b08
+#define EN_IP_GSCL_SECURE_SMMU_GSCL1 0x0b0c
+#define EN_IP_GSCL_SECURE_SMMU_MSCL0 0x0b10
+#define EN_IP_GSCL_SECURE_SMMU_MSCL1 0x0b14
+
+/*
+*Registers for CMU_ISP
+*/
+#define MUX_SEL_ISP0 0x0200
+#define MUX_SEL_ISP1 0x0204
+#define MUX_ENABLE_ISP0 0x0300
+#define MUX_ENABLE_ISP1 0x0304
+#define MUX_STAT_ISP0 0x0400
+#define MUX_STAT_ISP1 0x0404
+#define MUX_IGNORE_ISP0 0x0500
+#define MUX_IGNORE_ISP1 0x0504
+#define DIV_ISP 0x0600
+#define DIV_STAT_ISP 0x0700
+#define EN_ACLK_ISP0 0x0800
+#define EN_ACLK_ISP1 0x0804
+#define EN_PCLK_ISP0 0x0900
+#define EN_PCLK_ISP1 0x0904
+#define EN_SCLK_ISP 0x0a00
+#define EN_IP_ISP0 0x0b00
+#define EN_IP_ISP1 0x0b04
+
+/*
+*Registers for CMU_KFC
+*/
+#define KFC_PLL_LOCK 0x0000
+#define KFC_PLL_CON0 0x0100
+#define KFC_PLL_CON1 0x0104
+#define KFC_PLL_FDET 0x010c
+#define MUX_SEL_KFC0 0x0200
+#define MUX_SEL_KFC2 0x0208
+#define MUX_ENABLE_KFC0 0x0300
+#define MUX_ENABLE_KFC2 0x0308
+#define MUX_STAT_KFC0 0x0400
+#define MUX_STAT_KFC2 0x0408
+#define DIV_KFC 0x0600
+#define DIV_KFC_PLL_FDET 0x0604
+#define DIV_STAT_KFC 0x0700
+#define DIV_STAT_KFC_PLL_FDET 0x0704
+#define EN_ACLK_KFC 0x0800
+#define EN_PCLK_KFC 0x0900
+#define EN_SCLK_KFC 0x0a00
+#define EN_IP_KFC 0x0b00
+#define CLKOUT_CMU_KFC 0x0c00
+#define CLKOUT_CMU_KFC_DIV_STAT 0x0c04
+#define ARMCLK_STOPCTRL_KFC 0x1000
+#define ARM_EMA_CTRL 0x1008
+#define ARM_EMA_STATUS 0x100c
+#define PWR_CTRL_KFC 0x1020
+#define PWR_CTRL2_KFC 0x1024
+#define CLKSTOP_CTRL_KFC 0x1028
+#define INTR_SPREAD_ENABLE_KFC 0x1080
+#define INTR_SPREAD_USE_STANDBYWFI_KFC 0x1084
+#define INTR_SPREAD_BLOCKING_DURATION_KFC 0x1088
+#define CMU_KFC_SPARE0 0x2000
+#define CMU_KFC_SPARE1 0x2004
+#define CMU_KFC_SPARE2 0x2008
+#define CMU_KFC_SPARE3 0x200c
+#define CMU_KFC_SPARE4 0x2010
+
+/*
+*Registers for CMU_MFC
+*/
+#define MUX_SEL_MFC 0x0200
+#define MUX_ENABLE_MFC 0x0300
+#define MUX_STAT_MFC 0x0400
+#define DIV_MFC 0x0600
+#define DIV_STAT_MFC 0x0700
+#define EN_ACLK_MFC 0x0800
+#define EN_ACLK_SECURE_SMMU2_MFC 0x0804
+#define EN_PCLK_MFC 0x0900
+#define EN_PCLK_SECURE_SMMU2_MFC 0x0904
+#define EN_IP_MFC 0x0b00
+#define EN_IP_MFC_SECURE_SMMU2_MFC 0x0b04
+
+/*
+*Registers for CMU_MIF
+*/
+#define MEM_PLL_LOCK 0x0000
+#define BUS_PLL_LOCK 0x0004
+#define MEDIA_PLL_LOCK 0x0008
+#define MEM_PLL_CON0 0x0100
+#define MEM_PLL_CON1 0x0104
+#define MEM_PLL_FDET 0x010c
+#define BUS_PLL_CON0 0x0110
+#define BUS_PLL_CON1 0x0114
+#define BUS_PLL_FDET 0x011c
+#define MEDIA_PLL_CON0 0x0120
+#define MEDIA_PLL_CON1 0x0124
+#define MEDIA_PLL_FDET 0x012c
+#define MUX_SEL_MIF 0x0200
+#define MUX_ENABLE_MIF 0x0300
+#define MUX_STAT_MIF 0x0400
+#define MUX_IGNORE_MIF 0x0500
+#define DIV_MIF 0x0600
+#define DIV_MIF_PLL_FDET 0x0604
+#define DIV_STAT_MIF 0x0700
+#define DIV_STAT_MIF_PLL_FDET 0x0704
+#define EN_ACLK_MIF 0x0800
+#define EN_ACLK_MIF_SECURE_DREX1_TZ 0x0804
+#define EN_ACLK_MIF_SECURE_DREX0_TZ 0x0808
+#define EN_ACLK_MIF_SECURE_INTMEM 0x080c
+#define EN_PCLK_MIF 0x0900
+#define EN_PCLK_MIF_SECURE_MONOCNT 0x0904
+#define EN_PCLK_MIF_SECURE_RTC_APBIF 0x0908
+#define EN_PCLK_MIF_SECURE_DREX1_TZ 0x090c
+#define EN_PCLK_MIF_SECURE_DREX0_TZ 0x0910
+#define EN_SCLK_MIF 0x0a00
+#define EN_IP_MIF 0x0b00
+#define EN_IP_MIF_SECURE_MONOCNT 0x0b04
+#define EN_IP_MIF_SECURE_RTC_APBIF 0x0b08
+#define EN_IP_MIF_SECURE_DREX1_TZ 0x0b0c
+#define EN_IP_MIF_SECURE_DREX0_TZ 0x0b10
+#define EN_IP_MIF_SECURE_INTEMEM 0x0b14
+#define CLKOUT_CMU_MIF_DIV_STAT 0x0c04
+#define DREX_FREQ_CTRL 0x1000
+#define PAUSE 0x1004
+#define DDRPHY_LOCK_CTRL 0x1008
+#define CLKOUT_CMU_MIF 0xcb00
+
+/*
+*Registers for CMU_PERI
+*/
+#define MUX_SEL_PERI 0x0200
+#define MUX_SEL_PERI1 0x0204
+#define MUX_ENABLE_PERI 0x0300
+#define MUX_ENABLE_PERI1 0x0304
+#define MUX_STAT_PERI 0x0400
+#define MUX_STAT_PERI1 0x0404
+#define MUX_IGNORE_PERI 0x0500
+#define MUX_IGNORE_PERI1 0x0504
+#define DIV_PERI 0x0600
+#define DIV_STAT_PERI 0x0700
+#define EN_PCLK_PERI0 0x0800
+#define EN_PCLK_PERI1 0x0804
+#define EN_PCLK_PERI2 0x0808
+#define EN_PCLK_PERI3 0x080c
+#define EN_PCLK_PERI_SECURE_CHIPID 0x0810
+#define EN_PCLK_PERI_SECURE_PROVKEY0 0x0814
+#define EN_PCLK_PERI_SECURE_PROVKEY1 0x0818
+#define EN_PCLK_PERI_SECURE_SECKEY 0x081c
+#define EN_PCLK_PERI_SECURE_ANTIRBKCNT 0x0820
+#define EN_PCLK_PERI_SECURE_TOP_RTC 0x0824
+#define EN_PCLK_PERI_SECURE_TZPC 0x0828
+#define EN_SCLK_PERI 0x0a00
+#define EN_SCLK_PERI_SECURE_TOP_RTC 0x0a04
+#define EN_IP_PERI0 0x0b00
+#define EN_IP_PERI1 0x0b04
+#define EN_IP_PERI2 0x0b08
+#define EN_IP_PERI_SECURE_CHIPID 0x0b0c
+#define EN_IP_PERI_SECURE_PROVKEY0 0x0b10
+#define EN_IP_PERI_SECURE_PROVKEY1 0x0b14
+#define EN_IP_PERI_SECURE_SECKEY 0x0b18
+#define EN_IP_PERI_SECURE_ANTIRBKCNT 0x0b1c
+#define EN_IP_PERI_SECURE_TOP_RTC 0x0b20
+#define EN_IP_PERI_SECURE_TZPC 0x0b24
+
+/*
+*Registers for CMU_TOP
+*/
+#define DISP_PLL_LOCK 0x0000
+#define AUD_PLL_LOCK 0x0004
+#define DISP_PLL_CON0 0x0100
+#define DISP_PLL_CON1 0x0104
+#define DISP_PLL_FDET 0x0108
+#define AUD_PLL_CON0 0x0110
+#define AUD_PLL_CON1 0x0114
+#define AUD_PLL_CON2 0x0118
+#define AUD_PLL_FDET 0x011c
+#define MUX_SEL_TOP_PLL0 0x0200
+#define MUX_SEL_TOP_MFC 0x0204
+#define MUX_SEL_TOP_G2D 0x0208
+#define MUX_SEL_TOP_GSCL 0x020c
+#define MUX_SEL_TOP_ISP10 0x0214
+#define MUX_SEL_TOP_ISP11 0x0218
+#define MUX_SEL_TOP_DISP0 0x021c
+#define MUX_SEL_TOP_DISP1 0x0220
+#define MUX_SEL_TOP_BUS 0x0224
+#define MUX_SEL_TOP_PERI0 0x0228
+#define MUX_SEL_TOP_PERI1 0x022c
+#define MUX_SEL_TOP_FSYS 0x0230
+#define MUX_ENABLE_TOP_PLL0 0x0300
+#define MUX_ENABLE_TOP_MFC 0x0304
+#define MUX_ENABLE_TOP_G2D 0x0308
+#define MUX_ENABLE_TOP_GSCL 0x030c
+#define MUX_ENABLE_TOP_ISP10 0x0314
+#define MUX_ENABLE_TOP_ISP11 0x0318
+#define MUX_ENABLE_TOP_DISP0 0x031c
+#define MUX_ENABLE_TOP_DISP1 0x0320
+#define MUX_ENABLE_TOP_BUS 0x0324
+#define MUX_ENABLE_TOP_PERI0 0x0328
+#define MUX_ENABLE_TOP_PERI1 0x032c
+#define MUX_ENABLE_TOP_FSYS 0x0330
+#define MUX_STAT_TOP_PLL0 0x0400
+#define MUX_STAT_TOP_MFC 0x0404
+#define MUX_STAT_TOP_G2D 0x0408
+#define MUX_STAT_TOP_GSCL 0x040c
+#define MUX_STAT_TOP_ISP10 0x0414
+#define MUX_STAT_TOP_ISP11 0x0418
+#define MUX_STAT_TOP_DISP0 0x041c
+#define MUX_STAT_TOP_DISP1 0x0420
+#define MUX_STAT_TOP_BUS 0x0424
+#define MUX_STAT_TOP_PERI0 0x0428
+#define MUX_STAT_TOP_PERI1 0x042c
+#define MUX_STAT_TOP_FSYS 0x0430
+#define MUX_IGNORE_TOP_PLL0 0x0500
+#define MUX_IGNORE_TOP_MFC 0x0504
+#define MUX_IGNORE_TOP_G2D 0x0508
+#define MUX_IGNORE_TOP_GSCL 0x050c
+#define MUX_IGNORE_TOP_ISP10 0x0514
+#define MUX_IGNORE_TOP_ISP11 0x0518
+#define MUX_IGNORE_TOP_DISP0 0x051c
+#define MUX_IGNORE_TOP_DISP1 0x0520
+#define MUX_IGNORE_TOP_BUS 0x0524
+#define MUX_IGNORE_TOP_PERI0 0x0528
+#define MUX_IGNORE_TOP_PERI1 0x052c
+#define MUX_IGNORE_TOP_FSYS 0x0530
+#define DIV_TOP_G2D_MFC 0x0600
+#define DIV_TOP_GSCL_ISP0 0x0604
+#define DIV_TOP_ISP10 0x0608
+#define DIV_TOP_ISP11 0x060c
+#define DIV_TOP_DISP 0x0610
+#define DIV_TOP_BUS 0x0614
+#define DIV_TOP_PERI0 0x0618
+#define DIV_TOP_PERI1 0x061c
+#define DIV_TOP_PERI2 0x0620
+#define DIV_TOP_FSYS0 0x0624
+#define DIV_TOP_FSYS1 0x0628
+#define DIV_TOP_HPM 0x062c
+#define DIV_TOP_PLL_FDET 0x0630
+#define DIV_STAT_TOP_G2D_MFC 0x0700
+#define DIV_STAT_TOP_GSCL_ISP0 0x0704
+#define DIV_STAT_TOP_ISP10 0x0708
+#define DIV_STAT_TOP_ISP11 0x070c
+#define DIV_STAT_TOP_DISP 0x0710
+#define DIV_STAT_TOP_BUS 0x0714
+#define DIV_STAT_TOP_PERI0 0x0718
+#define DIV_STAT_TOP_PERI1 0x071c
+#define DIV_STAT_TOP_PERI2 0x0720
+#define DIV_STAT_TOP_FSYS0 0x0724
+#define DIV_STAT_TOP_FSYS1 0x0728
+#define DIV_STAT_TOP_HPM 0x072c
+#define DIV_STAT_TOP_PLL_FDET 0x0730
+#define EN_ACLK_TOP 0x0800
+#define EN_SCLK_TOP 0x0a00
+#define EN_IP_TOP 0x0b00
+#define CLKOUT_CMU_TOP 0x0c00
+#define CLKOUT_CMU_TOP_DIV_STAT 0x0c04
+
+#endif /*__CLK_EXYNOS5260_H */
+
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
new file mode 100644
index 000000000000..c9505ab9ee70
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Author: Tarek Dakhran <t.dakhran@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5410 SoC.
+*/
+
+#include <dt-bindings/clock/exynos5410.h>
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+#define APLL_LOCK 0x0
+#define APLL_CON0 0x100
+#define CPLL_LOCK 0x10020
+#define CPLL_CON0 0x10120
+#define MPLL_LOCK 0x4000
+#define MPLL_CON0 0x4100
+#define BPLL_LOCK 0x20010
+#define BPLL_CON0 0x20110
+#define KPLL_LOCK 0x28000
+#define KPLL_CON0 0x28100
+
+#define SRC_CPU 0x200
+#define DIV_CPU0 0x500
+#define SRC_CPERI1 0x4204
+#define DIV_TOP0 0x10510
+#define DIV_TOP1 0x10514
+#define DIV_FSYS1 0x1054c
+#define DIV_FSYS2 0x10550
+#define DIV_PERIC0 0x10558
+#define SRC_TOP0 0x10210
+#define SRC_TOP1 0x10214
+#define SRC_TOP2 0x10218
+#define SRC_FSYS 0x10244
+#define SRC_PERIC0 0x10250
+#define SRC_MASK_FSYS 0x10340
+#define SRC_MASK_PERIC0 0x10350
+#define GATE_BUS_FSYS0 0x10740
+#define GATE_IP_FSYS 0x10944
+#define GATE_IP_PERIC 0x10950
+#define GATE_IP_PERIS 0x10960
+#define SRC_CDREX 0x20200
+#define SRC_KFC 0x28200
+#define DIV_KFC0 0x28500
+
+/* list of PLLs */
+enum exynos5410_plls {
+ apll, cpll, mpll,
+ bpll, kpll,
+ nr_plls /* number of PLLs */
+};
+
+/* list of all parent clocks */
+PNAME(apll_p) = { "fin_pll", "fout_apll", };
+PNAME(bpll_p) = { "fin_pll", "fout_bpll", };
+PNAME(cpll_p) = { "fin_pll", "fout_cpll" };
+PNAME(mpll_p) = { "fin_pll", "fout_mpll", };
+PNAME(kpll_p) = { "fin_pll", "fout_kpll", };
+
+PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
+PNAME(mout_kfc_p) = { "mout_kpll", "sclk_mpll", };
+
+PNAME(mpll_user_p) = { "fin_pll", "sclk_mpll", };
+PNAME(bpll_user_p) = { "fin_pll", "sclk_bpll", };
+PNAME(mpll_bpll_p) = { "sclk_mpll_muxed", "sclk_bpll_muxed", };
+
+PNAME(group2_p) = { "fin_pll", "fin_pll", "none", "none",
+ "none", "none", "sclk_mpll_bpll",
+ "none", "none", "sclk_cpll" };
+
+static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = {
+ MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1),
+ MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+
+ MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
+ MUX(0, "mout_kfc", mout_kfc_p, SRC_KFC, 16, 1),
+
+ MUX(0, "sclk_mpll", mpll_p, SRC_CPERI1, 8, 1),
+ MUX(0, "sclk_mpll_muxed", mpll_user_p, SRC_TOP2, 20, 1),
+
+ MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
+ MUX(0, "sclk_bpll_muxed", bpll_user_p, SRC_TOP2, 24, 1),
+
+ MUX(0, "sclk_cpll", cpll_p, SRC_TOP2, 8, 1),
+
+ MUX(0, "sclk_mpll_bpll", mpll_bpll_p, SRC_TOP1, 20, 1),
+
+ MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 8, 4),
+
+ MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 0, 4),
+ MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 4, 4),
+ MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 8, 4),
+
+ MUX(0, "mout_aclk200", mpll_bpll_p, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk400", mpll_bpll_p, SRC_TOP0, 20, 1),
+};
+
+static struct samsung_div_clock exynos5410_div_clks[] __initdata = {
+ DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
+
+ DIV(0, "div_acp", "div_arm2", DIV_CPU0, 8, 3),
+ DIV(0, "div_cpud", "div_arm2", DIV_CPU0, 4, 3),
+ DIV(0, "div_atb", "div_arm2", DIV_CPU0, 16, 3),
+ DIV(0, "pclk_dbg", "div_arm2", DIV_CPU0, 20, 3),
+
+ DIV(0, "div_kfc", "mout_kfc", DIV_KFC0, 0, 3),
+ DIV(0, "div_aclk", "div_kfc", DIV_KFC0, 4, 3),
+ DIV(0, "div_pclk", "div_kfc", DIV_KFC0, 20, 3),
+
+ DIV(0, "aclk66_pre", "sclk_mpll_muxed", DIV_TOP1, 24, 3),
+ DIV(0, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3),
+
+ DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+
+ DIV_F(0, "div_mmc_pre0", "div_mmc0",
+ DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(0, "div_mmc_pre1", "div_mmc1",
+ DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(0, "div_mmc_pre2", "div_mmc2",
+ DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0),
+
+ DIV(0, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
+ DIV(0, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
+ DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
+ DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
+
+ DIV(0, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
+ DIV(0, "aclk400", "mout_aclk400", DIV_TOP0, 24, 3),
+};
+
+static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = {
+ GATE(CLK_MCT, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0),
+
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0",
+ SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1",
+ SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2",
+ SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_MMC0, "sdmmc0", "aclk200", GATE_BUS_FSYS0, 12, 0, 0),
+ GATE(CLK_MMC1, "sdmmc1", "aclk200", GATE_BUS_FSYS0, 13, 0, 0),
+ GATE(CLK_MMC2, "sdmmc2", "aclk200", GATE_BUS_FSYS0, 14, 0, 0),
+
+ GATE(CLK_UART0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0),
+
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
+ SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
+ SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
+ SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, NULL),
+ [cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
+ CPLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
+ [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
+ BPLL_CON0, NULL),
+ [kpll] = PLL(pll_35xx, CLK_FOUT_KPLL, "fout_kpll", "fin_pll", KPLL_LOCK,
+ KPLL_CON0, NULL),
+};
+
+/* register exynos5410 clocks */
+static void __init exynos5410_clk_init(struct device_node *np)
+{
+ struct samsung_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+
+ samsung_clk_register_pll(ctx, exynos5410_plls,
+ ARRAY_SIZE(exynos5410_plls), reg_base);
+
+ samsung_clk_register_mux(ctx, exynos5410_mux_clks,
+ ARRAY_SIZE(exynos5410_mux_clks));
+ samsung_clk_register_div(ctx, exynos5410_div_clks,
+ ARRAY_SIZE(exynos5410_div_clks));
+ samsung_clk_register_gate(ctx, exynos5410_gate_clks,
+ ARRAY_SIZE(exynos5410_gate_clks));
+
+ pr_debug("Exynos5410: clock setup completed.\n");
+}
+CLK_OF_DECLARE(exynos5410_clk, "samsung,exynos5410-clock", exynos5410_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 60b26819bed5..9d7d7eed03fd 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -27,18 +27,24 @@
#define DIV_CPU1 0x504
#define GATE_BUS_CPU 0x700
#define GATE_SCLK_CPU 0x800
+#define CLKOUT_CMU_CPU 0xa00
+#define GATE_IP_G2D 0x8800
#define CPLL_LOCK 0x10020
#define DPLL_LOCK 0x10030
#define EPLL_LOCK 0x10040
#define RPLL_LOCK 0x10050
#define IPLL_LOCK 0x10060
#define SPLL_LOCK 0x10070
-#define VPLL_LOCK 0x10070
+#define VPLL_LOCK 0x10080
#define MPLL_LOCK 0x10090
#define CPLL_CON0 0x10120
#define DPLL_CON0 0x10128
#define EPLL_CON0 0x10130
+#define EPLL_CON1 0x10134
+#define EPLL_CON2 0x10138
#define RPLL_CON0 0x10140
+#define RPLL_CON1 0x10144
+#define RPLL_CON2 0x10148
#define IPLL_CON0 0x10150
#define SPLL_CON0 0x10160
#define VPLL_CON0 0x10170
@@ -51,21 +57,31 @@
#define SRC_TOP5 0x10214
#define SRC_TOP6 0x10218
#define SRC_TOP7 0x1021c
+#define SRC_TOP8 0x10220 /* 5800 specific */
+#define SRC_TOP9 0x10224 /* 5800 specific */
#define SRC_DISP10 0x1022c
#define SRC_MAU 0x10240
#define SRC_FSYS 0x10244
#define SRC_PERIC0 0x10250
#define SRC_PERIC1 0x10254
+#define SRC_ISP 0x10270
+#define SRC_CAM 0x10274 /* 5800 specific */
#define SRC_TOP10 0x10280
#define SRC_TOP11 0x10284
#define SRC_TOP12 0x10288
-#define SRC_MASK_DISP10 0x1032c
+#define SRC_TOP13 0x1028c /* 5800 specific */
+#define SRC_MASK_TOP2 0x10308
+#define SRC_MASK_TOP7 0x1031c
+#define SRC_MASK_DISP10 0x1032c
+#define SRC_MASK_MAU 0x10334
#define SRC_MASK_FSYS 0x10340
#define SRC_MASK_PERIC0 0x10350
#define SRC_MASK_PERIC1 0x10354
#define DIV_TOP0 0x10500
#define DIV_TOP1 0x10504
#define DIV_TOP2 0x10508
+#define DIV_TOP8 0x10520 /* 5800 specific */
+#define DIV_TOP9 0x10524 /* 5800 specific */
#define DIV_DISP10 0x1052c
#define DIV_MAU 0x10544
#define DIV_FSYS0 0x10548
@@ -76,54 +92,82 @@
#define DIV_PERIC2 0x10560
#define DIV_PERIC3 0x10564
#define DIV_PERIC4 0x10568
+#define DIV_CAM 0x10574 /* 5800 specific */
+#define SCLK_DIV_ISP0 0x10580
+#define SCLK_DIV_ISP1 0x10584
+#define DIV2_RATIO0 0x10590
+#define DIV4_RATIO 0x105a0
#define GATE_BUS_TOP 0x10700
+#define GATE_BUS_GEN 0x1073c
#define GATE_BUS_FSYS0 0x10740
+#define GATE_BUS_FSYS2 0x10748
#define GATE_BUS_PERIC 0x10750
#define GATE_BUS_PERIC1 0x10754
#define GATE_BUS_PERIS0 0x10760
#define GATE_BUS_PERIS1 0x10764
+#define GATE_BUS_NOC 0x10770
+#define GATE_TOP_SCLK_ISP 0x10870
#define GATE_IP_GSCL0 0x10910
#define GATE_IP_GSCL1 0x10920
+#define GATE_IP_CAM 0x10924 /* 5800 specific */
#define GATE_IP_MFC 0x1092c
#define GATE_IP_DISP1 0x10928
#define GATE_IP_G3D 0x10930
#define GATE_IP_GEN 0x10934
+#define GATE_IP_FSYS 0x10944
+#define GATE_IP_PERIC 0x10950
+#define GATE_IP_PERIS 0x10960
#define GATE_IP_MSCL 0x10970
#define GATE_TOP_SCLK_GSCL 0x10820
#define GATE_TOP_SCLK_DISP1 0x10828
#define GATE_TOP_SCLK_MAU 0x1083c
#define GATE_TOP_SCLK_FSYS 0x10840
#define GATE_TOP_SCLK_PERIC 0x10850
+#define TOP_SPARE2 0x10b08
#define BPLL_LOCK 0x20010
#define BPLL_CON0 0x20110
-#define SRC_CDREX 0x20200
#define KPLL_LOCK 0x28000
#define KPLL_CON0 0x28100
#define SRC_KFC 0x28200
#define DIV_KFC0 0x28500
+/* Exynos5x SoC type */
+enum exynos5x_soc {
+ EXYNOS5420,
+ EXYNOS5800,
+};
+
/* list of PLLs */
-enum exynos5420_plls {
+enum exynos5x_plls {
apll, cpll, dpll, epll, rpll, ipll, spll, vpll, mpll,
bpll, kpll,
nr_plls /* number of PLLs */
};
static void __iomem *reg_base;
+static enum exynos5x_soc exynos5x_soc;
#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos5420_save;
+static struct samsung_clk_reg_dump *exynos5x_save;
+static struct samsung_clk_reg_dump *exynos5800_save;
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static unsigned long exynos5420_clk_regs[] __initdata = {
+static unsigned long exynos5x_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
DIV_CPU1,
GATE_BUS_CPU,
GATE_SCLK_CPU,
+ CLKOUT_CMU_CPU,
+ EPLL_CON0,
+ EPLL_CON1,
+ EPLL_CON2,
+ RPLL_CON0,
+ RPLL_CON1,
+ RPLL_CON2,
SRC_TOP0,
SRC_TOP1,
SRC_TOP2,
@@ -140,10 +184,13 @@ static unsigned long exynos5420_clk_regs[] __initdata = {
SRC_TOP10,
SRC_TOP11,
SRC_TOP12,
+ SRC_MASK_TOP2,
+ SRC_MASK_TOP7,
SRC_MASK_DISP10,
SRC_MASK_FSYS,
SRC_MASK_PERIC0,
SRC_MASK_PERIC1,
+ SRC_ISP,
DIV_TOP0,
DIV_TOP1,
DIV_TOP2,
@@ -157,41 +204,71 @@ static unsigned long exynos5420_clk_regs[] __initdata = {
DIV_PERIC2,
DIV_PERIC3,
DIV_PERIC4,
+ SCLK_DIV_ISP0,
+ SCLK_DIV_ISP1,
+ DIV2_RATIO0,
+ DIV4_RATIO,
GATE_BUS_TOP,
+ GATE_BUS_GEN,
GATE_BUS_FSYS0,
+ GATE_BUS_FSYS2,
GATE_BUS_PERIC,
GATE_BUS_PERIC1,
GATE_BUS_PERIS0,
GATE_BUS_PERIS1,
+ GATE_BUS_NOC,
+ GATE_TOP_SCLK_ISP,
GATE_IP_GSCL0,
GATE_IP_GSCL1,
GATE_IP_MFC,
GATE_IP_DISP1,
GATE_IP_G3D,
GATE_IP_GEN,
+ GATE_IP_FSYS,
+ GATE_IP_PERIC,
+ GATE_IP_PERIS,
GATE_IP_MSCL,
GATE_TOP_SCLK_GSCL,
GATE_TOP_SCLK_DISP1,
GATE_TOP_SCLK_MAU,
GATE_TOP_SCLK_FSYS,
GATE_TOP_SCLK_PERIC,
- SRC_CDREX,
+ TOP_SPARE2,
SRC_KFC,
DIV_KFC0,
};
+static unsigned long exynos5800_clk_regs[] __initdata = {
+ SRC_TOP8,
+ SRC_TOP9,
+ SRC_CAM,
+ SRC_TOP1,
+ DIV_TOP8,
+ DIV_TOP9,
+ DIV_CAM,
+ GATE_IP_CAM,
+};
+
static int exynos5420_clk_suspend(void)
{
- samsung_clk_save(reg_base, exynos5420_save,
- ARRAY_SIZE(exynos5420_clk_regs));
+ samsung_clk_save(reg_base, exynos5x_save,
+ ARRAY_SIZE(exynos5x_clk_regs));
+
+ if (exynos5x_soc == EXYNOS5800)
+ samsung_clk_save(reg_base, exynos5800_save,
+ ARRAY_SIZE(exynos5800_clk_regs));
return 0;
}
static void exynos5420_clk_resume(void)
{
- samsung_clk_restore(reg_base, exynos5420_save,
- ARRAY_SIZE(exynos5420_clk_regs));
+ samsung_clk_restore(reg_base, exynos5x_save,
+ ARRAY_SIZE(exynos5x_clk_regs));
+
+ if (exynos5x_soc == EXYNOS5800)
+ samsung_clk_restore(reg_base, exynos5800_save,
+ ARRAY_SIZE(exynos5800_clk_regs));
}
static struct syscore_ops exynos5420_clk_syscore_ops = {
@@ -201,108 +278,183 @@ static struct syscore_ops exynos5420_clk_syscore_ops = {
static void exynos5420_clk_sleep_init(void)
{
- exynos5420_save = samsung_clk_alloc_reg_dump(exynos5420_clk_regs,
- ARRAY_SIZE(exynos5420_clk_regs));
- if (!exynos5420_save) {
+ exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs,
+ ARRAY_SIZE(exynos5x_clk_regs));
+ if (!exynos5x_save) {
pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
__func__);
return;
}
+ if (exynos5x_soc == EXYNOS5800) {
+ exynos5800_save =
+ samsung_clk_alloc_reg_dump(exynos5800_clk_regs,
+ ARRAY_SIZE(exynos5800_clk_regs));
+ if (!exynos5800_save)
+ goto err_soc;
+ }
+
register_syscore_ops(&exynos5420_clk_syscore_ops);
+ return;
+err_soc:
+ kfree(exynos5x_save);
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
}
#else
static void exynos5420_clk_sleep_init(void) {}
#endif
/* list of all parent clocks */
-PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll",
- "sclk_mpll", "sclk_spll" };
-PNAME(cpu_p) = { "mout_apll" , "mout_mspll_cpu" };
-PNAME(kfc_p) = { "mout_kpll" , "mout_mspll_kfc" };
-PNAME(apll_p) = { "fin_pll", "fout_apll", };
-PNAME(bpll_p) = { "fin_pll", "fout_bpll", };
-PNAME(cpll_p) = { "fin_pll", "fout_cpll", };
-PNAME(dpll_p) = { "fin_pll", "fout_dpll", };
-PNAME(epll_p) = { "fin_pll", "fout_epll", };
-PNAME(ipll_p) = { "fin_pll", "fout_ipll", };
-PNAME(kpll_p) = { "fin_pll", "fout_kpll", };
-PNAME(mpll_p) = { "fin_pll", "fout_mpll", };
-PNAME(rpll_p) = { "fin_pll", "fout_rpll", };
-PNAME(spll_p) = { "fin_pll", "fout_spll", };
-PNAME(vpll_p) = { "fin_pll", "fout_vpll", };
-
-PNAME(group1_p) = { "sclk_cpll", "sclk_dpll", "sclk_mpll" };
-PNAME(group2_p) = { "fin_pll", "sclk_cpll", "sclk_dpll", "sclk_mpll",
- "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(group3_p) = { "sclk_rpll", "sclk_spll" };
-PNAME(group4_p) = { "sclk_ipll", "sclk_dpll", "sclk_mpll" };
-PNAME(group5_p) = { "sclk_vpll", "sclk_dpll" };
-
-PNAME(sw_aclk66_p) = { "dout_aclk66", "sclk_spll" };
-PNAME(aclk66_peric_p) = { "fin_pll", "mout_sw_aclk66" };
-
-PNAME(sw_aclk200_fsys_p) = { "dout_aclk200_fsys", "sclk_spll"};
-PNAME(user_aclk200_fsys_p) = { "fin_pll", "mout_sw_aclk200_fsys" };
-
-PNAME(sw_aclk200_fsys2_p) = { "dout_aclk200_fsys2", "sclk_spll"};
-PNAME(user_aclk200_fsys2_p) = { "fin_pll", "mout_sw_aclk200_fsys2" };
-
-PNAME(sw_aclk200_p) = { "dout_aclk200", "sclk_spll"};
-PNAME(aclk200_disp1_p) = { "fin_pll", "mout_sw_aclk200" };
-
-PNAME(sw_aclk400_mscl_p) = { "dout_aclk400_mscl", "sclk_spll"};
-PNAME(user_aclk400_mscl_p) = { "fin_pll", "mout_sw_aclk400_mscl" };
-
-PNAME(sw_aclk333_p) = { "dout_aclk333", "sclk_spll"};
-PNAME(user_aclk333_p) = { "fin_pll", "mout_sw_aclk333" };
-
-PNAME(sw_aclk166_p) = { "dout_aclk166", "sclk_spll"};
-PNAME(user_aclk166_p) = { "fin_pll", "mout_sw_aclk166" };
-
-PNAME(sw_aclk266_p) = { "dout_aclk266", "sclk_spll"};
-PNAME(user_aclk266_p) = { "fin_pll", "mout_sw_aclk266" };
-
-PNAME(sw_aclk333_432_gscl_p) = { "dout_aclk333_432_gscl", "sclk_spll"};
-PNAME(user_aclk333_432_gscl_p) = { "fin_pll", "mout_sw_aclk333_432_gscl" };
-
-PNAME(sw_aclk300_gscl_p) = { "dout_aclk300_gscl", "sclk_spll"};
-PNAME(user_aclk300_gscl_p) = { "fin_pll", "mout_sw_aclk300_gscl" };
-
-PNAME(sw_aclk300_disp1_p) = { "dout_aclk300_disp1", "sclk_spll"};
-PNAME(user_aclk300_disp1_p) = { "fin_pll", "mout_sw_aclk300_disp1" };
-
-PNAME(sw_aclk300_jpeg_p) = { "dout_aclk300_jpeg", "sclk_spll"};
-PNAME(user_aclk300_jpeg_p) = { "fin_pll", "mout_sw_aclk300_jpeg" };
-
-PNAME(sw_aclk_g3d_p) = { "dout_aclk_g3d", "sclk_spll"};
-PNAME(user_aclk_g3d_p) = { "fin_pll", "mout_sw_aclk_g3d" };
-
-PNAME(sw_aclk266_g2d_p) = { "dout_aclk266_g2d", "sclk_spll"};
-PNAME(user_aclk266_g2d_p) = { "fin_pll", "mout_sw_aclk266_g2d" };
-
-PNAME(sw_aclk333_g2d_p) = { "dout_aclk333_g2d", "sclk_spll"};
-PNAME(user_aclk333_g2d_p) = { "fin_pll", "mout_sw_aclk333_g2d" };
-
-PNAME(audio0_p) = { "fin_pll", "cdclk0", "sclk_dpll", "sclk_mpll",
- "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(audio1_p) = { "fin_pll", "cdclk1", "sclk_dpll", "sclk_mpll",
- "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(audio2_p) = { "fin_pll", "cdclk2", "sclk_dpll", "sclk_mpll",
- "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(spdif_p) = { "fin_pll", "dout_audio0", "dout_audio1", "dout_audio2",
- "spdif_extclk", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(hdmi_p) = { "dout_hdmi_pixel", "sclk_hdmiphy" };
-PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
- "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
+PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll"};
+PNAME(mout_cpu_p) = {"mout_apll" , "mout_mspll_cpu"};
+PNAME(mout_kfc_p) = {"mout_kpll" , "mout_mspll_kfc"};
+PNAME(mout_apll_p) = {"fin_pll", "fout_apll"};
+PNAME(mout_bpll_p) = {"fin_pll", "fout_bpll"};
+PNAME(mout_cpll_p) = {"fin_pll", "fout_cpll"};
+PNAME(mout_dpll_p) = {"fin_pll", "fout_dpll"};
+PNAME(mout_epll_p) = {"fin_pll", "fout_epll"};
+PNAME(mout_ipll_p) = {"fin_pll", "fout_ipll"};
+PNAME(mout_kpll_p) = {"fin_pll", "fout_kpll"};
+PNAME(mout_mpll_p) = {"fin_pll", "fout_mpll"};
+PNAME(mout_rpll_p) = {"fin_pll", "fout_rpll"};
+PNAME(mout_spll_p) = {"fin_pll", "fout_spll"};
+PNAME(mout_vpll_p) = {"fin_pll", "fout_vpll"};
+
+PNAME(mout_group1_p) = {"mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll"};
+PNAME(mout_group2_p) = {"fin_pll", "mout_sclk_cpll",
+ "mout_sclk_dpll", "mout_sclk_mpll", "mout_sclk_spll",
+ "mout_sclk_ipll", "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_group3_p) = {"mout_sclk_rpll", "mout_sclk_spll"};
+PNAME(mout_group4_p) = {"mout_sclk_ipll", "mout_sclk_dpll", "mout_sclk_mpll"};
+PNAME(mout_group5_p) = {"mout_sclk_vpll", "mout_sclk_dpll"};
+
+PNAME(mout_fimd1_final_p) = {"mout_fimd1", "mout_fimd1_opt"};
+PNAME(mout_sw_aclk66_p) = {"dout_aclk66", "mout_sclk_spll"};
+PNAME(mout_user_aclk66_peric_p) = { "fin_pll", "mout_sw_aclk66"};
+PNAME(mout_user_pclk66_gpio_p) = {"mout_sw_aclk66", "ff_sw_aclk66"};
+
+PNAME(mout_sw_aclk200_fsys_p) = {"dout_aclk200_fsys", "mout_sclk_spll"};
+PNAME(mout_sw_pclk200_fsys_p) = {"dout_pclk200_fsys", "mout_sclk_spll"};
+PNAME(mout_user_pclk200_fsys_p) = {"fin_pll", "mout_sw_pclk200_fsys"};
+PNAME(mout_user_aclk200_fsys_p) = {"fin_pll", "mout_sw_aclk200_fsys"};
+
+PNAME(mout_sw_aclk200_fsys2_p) = {"dout_aclk200_fsys2", "mout_sclk_spll"};
+PNAME(mout_user_aclk200_fsys2_p) = {"fin_pll", "mout_sw_aclk200_fsys2"};
+PNAME(mout_sw_aclk100_noc_p) = {"dout_aclk100_noc", "mout_sclk_spll"};
+PNAME(mout_user_aclk100_noc_p) = {"fin_pll", "mout_sw_aclk100_noc"};
+
+PNAME(mout_sw_aclk400_wcore_p) = {"dout_aclk400_wcore", "mout_sclk_spll"};
+PNAME(mout_aclk400_wcore_bpll_p) = {"mout_aclk400_wcore", "sclk_bpll"};
+PNAME(mout_user_aclk400_wcore_p) = {"fin_pll", "mout_sw_aclk400_wcore"};
+
+PNAME(mout_sw_aclk400_isp_p) = {"dout_aclk400_isp", "mout_sclk_spll"};
+PNAME(mout_user_aclk400_isp_p) = {"fin_pll", "mout_sw_aclk400_isp"};
+
+PNAME(mout_sw_aclk333_432_isp0_p) = {"dout_aclk333_432_isp0",
+ "mout_sclk_spll"};
+PNAME(mout_user_aclk333_432_isp0_p) = {"fin_pll", "mout_sw_aclk333_432_isp0"};
+
+PNAME(mout_sw_aclk333_432_isp_p) = {"dout_aclk333_432_isp", "mout_sclk_spll"};
+PNAME(mout_user_aclk333_432_isp_p) = {"fin_pll", "mout_sw_aclk333_432_isp"};
+
+PNAME(mout_sw_aclk200_p) = {"dout_aclk200", "mout_sclk_spll"};
+PNAME(mout_user_aclk200_disp1_p) = {"fin_pll", "mout_sw_aclk200"};
+
+PNAME(mout_sw_aclk400_mscl_p) = {"dout_aclk400_mscl", "mout_sclk_spll"};
+PNAME(mout_user_aclk400_mscl_p) = {"fin_pll", "mout_sw_aclk400_mscl"};
+
+PNAME(mout_sw_aclk333_p) = {"dout_aclk333", "mout_sclk_spll"};
+PNAME(mout_user_aclk333_p) = {"fin_pll", "mout_sw_aclk333"};
+
+PNAME(mout_sw_aclk166_p) = {"dout_aclk166", "mout_sclk_spll"};
+PNAME(mout_user_aclk166_p) = {"fin_pll", "mout_sw_aclk166"};
+
+PNAME(mout_sw_aclk266_p) = {"dout_aclk266", "mout_sclk_spll"};
+PNAME(mout_user_aclk266_p) = {"fin_pll", "mout_sw_aclk266"};
+PNAME(mout_user_aclk266_isp_p) = {"fin_pll", "mout_sw_aclk266"};
+
+PNAME(mout_sw_aclk333_432_gscl_p) = {"dout_aclk333_432_gscl", "mout_sclk_spll"};
+PNAME(mout_user_aclk333_432_gscl_p) = {"fin_pll", "mout_sw_aclk333_432_gscl"};
+
+PNAME(mout_sw_aclk300_gscl_p) = {"dout_aclk300_gscl", "mout_sclk_spll"};
+PNAME(mout_user_aclk300_gscl_p) = {"fin_pll", "mout_sw_aclk300_gscl"};
+
+PNAME(mout_sw_aclk300_disp1_p) = {"dout_aclk300_disp1", "mout_sclk_spll"};
+PNAME(mout_sw_aclk400_disp1_p) = {"dout_aclk400_disp1", "mout_sclk_spll"};
+PNAME(mout_user_aclk300_disp1_p) = {"fin_pll", "mout_sw_aclk300_disp1"};
+PNAME(mout_user_aclk400_disp1_p) = {"fin_pll", "mout_sw_aclk400_disp1"};
+
+PNAME(mout_sw_aclk300_jpeg_p) = {"dout_aclk300_jpeg", "mout_sclk_spll"};
+PNAME(mout_user_aclk300_jpeg_p) = {"fin_pll", "mout_sw_aclk300_jpeg"};
+
+PNAME(mout_sw_aclk_g3d_p) = {"dout_aclk_g3d", "mout_sclk_spll"};
+PNAME(mout_user_aclk_g3d_p) = {"fin_pll", "mout_sw_aclk_g3d"};
+
+PNAME(mout_sw_aclk266_g2d_p) = {"dout_aclk266_g2d", "mout_sclk_spll"};
+PNAME(mout_user_aclk266_g2d_p) = {"fin_pll", "mout_sw_aclk266_g2d"};
+
+PNAME(mout_sw_aclk333_g2d_p) = {"dout_aclk333_g2d", "mout_sclk_spll"};
+PNAME(mout_user_aclk333_g2d_p) = {"fin_pll", "mout_sw_aclk333_g2d"};
+
+PNAME(mout_audio0_p) = {"fin_pll", "cdclk0", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll",
+ "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_audio1_p) = {"fin_pll", "cdclk1", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll",
+ "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_audio2_p) = {"fin_pll", "cdclk2", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll",
+ "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_spdif_p) = {"fin_pll", "dout_audio0", "dout_audio1",
+ "dout_audio2", "spdif_extclk", "mout_sclk_ipll",
+ "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_hdmi_p) = {"dout_hdmi_pixel", "sclk_hdmiphy"};
+PNAME(mout_maudio0_p) = {"fin_pll", "maudio_clk", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll",
+ "mout_sclk_epll", "mout_sclk_rpll"};
+PNAME(mout_mau_epll_clk_p) = {"mout_sclk_epll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll"};
+/* List of parents specific to exynos5800 */
+PNAME(mout_epll2_5800_p) = { "mout_sclk_epll", "ff_dout_epll2" };
+PNAME(mout_group1_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "ff_dout_spll2" };
+PNAME(mout_group2_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "ff_dout_spll2",
+ "mout_epll2", "mout_sclk_ipll" };
+PNAME(mout_group3_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "ff_dout_spll2",
+ "mout_epll2" };
+PNAME(mout_group5_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll" };
+PNAME(mout_group6_5800_p) = { "mout_sclk_ipll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "ff_dout_spll2" };
+PNAME(mout_group7_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "mout_sclk_spll",
+ "mout_epll2", "mout_sclk_ipll" };
+PNAME(mout_mau_epll_clk_5800_p) = { "mout_sclk_epll", "mout_sclk_dpll",
+ "mout_sclk_mpll",
+ "ff_dout_spll2" };
+PNAME(mout_group8_5800_p) = { "dout_aclk432_scaler", "dout_sclk_sw" };
+PNAME(mout_group9_5800_p) = { "dout_osc_div", "mout_sw_aclk432_scaler" };
+PNAME(mout_group10_5800_p) = { "dout_aclk432_cam", "dout_sclk_sw" };
+PNAME(mout_group11_5800_p) = { "dout_osc_div", "mout_sw_aclk432_cam" };
+PNAME(mout_group12_5800_p) = { "dout_aclkfl1_550_cam", "dout_sclk_sw" };
+PNAME(mout_group13_5800_p) = { "dout_osc_div", "mout_sw_aclkfl1_550_cam" };
+PNAME(mout_group14_5800_p) = { "dout_aclk550_cam", "dout_sclk_sw" };
+PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" };
/* fixed rate clocks generated outside the soc */
-static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock
+ exynos5x_fixed_rate_ext_clks[] __initdata = {
FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = {
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
@@ -310,146 +462,309 @@ static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata =
FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
};
-static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
- FFACTOR(0, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
+static struct samsung_fixed_factor_clock
+ exynos5x_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "ff_hsic_12m", "fin_pll", 1, 2, 0),
+ FFACTOR(0, "ff_sw_aclk66", "mout_sw_aclk66", 1, 2, 0),
};
-static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
- MUX(0, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
- MUX(0, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
- MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1),
- MUX(0, "mout_cpu", cpu_p, SRC_CPU, 16, 1),
- MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
- MUX(0, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1),
-
- MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
-
- MUX_A(0, "mout_aclk400_mscl", group1_p,
- SRC_TOP0, 4, 2, "aclk400_mscl"),
- MUX(0, "mout_aclk200", group1_p, SRC_TOP0, 8, 2),
- MUX(0, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2),
- MUX(0, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2),
-
- MUX(0, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2),
- MUX(0, "mout_aclk66", group1_p, SRC_TOP1, 8, 2),
- MUX(0, "mout_aclk266", group1_p, SRC_TOP1, 20, 2),
- MUX(0, "mout_aclk166", group1_p, SRC_TOP1, 24, 2),
- MUX(0, "mout_aclk333", group1_p, SRC_TOP1, 28, 2),
-
- MUX(0, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2),
- MUX(0, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2),
- MUX(0, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1),
- MUX(0, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2),
- MUX(0, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2),
- MUX(0, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2),
-
- MUX(0, "mout_user_aclk400_mscl", user_aclk400_mscl_p,
+static struct samsung_fixed_factor_clock
+ exynos5800_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "ff_dout_epll2", "mout_sclk_epll", 1, 2, 0),
+ FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0),
+};
+
+struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3),
+ MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3),
+ MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3),
+ MUX(0, "mout_aclk100_noc", mout_group1_5800_p, SRC_TOP0, 20, 2),
+
+ MUX(0, "mout_aclk333_432_gscl", mout_group6_5800_p, SRC_TOP1, 0, 2),
+ MUX(0, "mout_aclk333_432_isp", mout_group6_5800_p, SRC_TOP1, 4, 2),
+ MUX(0, "mout_aclk333_432_isp0", mout_group6_5800_p, SRC_TOP1, 12, 2),
+ MUX(0, "mout_aclk266", mout_group5_5800_p, SRC_TOP1, 20, 2),
+ MUX(0, "mout_aclk333", mout_group1_5800_p, SRC_TOP1, 28, 2),
+
+ MUX(0, "mout_aclk400_disp1", mout_group7_5800_p, SRC_TOP2, 4, 3),
+ MUX(0, "mout_aclk333_g2d", mout_group5_5800_p, SRC_TOP2, 8, 2),
+ MUX(0, "mout_aclk266_g2d", mout_group5_5800_p, SRC_TOP2, 12, 2),
+ MUX(0, "mout_aclk300_jpeg", mout_group5_5800_p, SRC_TOP2, 20, 2),
+ MUX(0, "mout_aclk300_disp1", mout_group5_5800_p, SRC_TOP2, 24, 2),
+ MUX(0, "mout_aclk300_gscl", mout_group5_5800_p, SRC_TOP2, 28, 2),
+
+ MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, SRC_TOP7,
+ 20, 2),
+ MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1),
+ MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1),
+
+ MUX(0, "mout_aclk550_cam", mout_group3_5800_p, SRC_TOP8, 16, 3),
+ MUX(0, "mout_aclkfl1_550_cam", mout_group3_5800_p, SRC_TOP8, 20, 3),
+ MUX(0, "mout_aclk432_cam", mout_group6_5800_p, SRC_TOP8, 24, 2),
+ MUX(0, "mout_aclk432_scaler", mout_group6_5800_p, SRC_TOP8, 28, 2),
+
+ MUX(0, "mout_user_aclk550_cam", mout_group15_5800_p,
+ SRC_TOP9, 16, 1),
+ MUX(0, "mout_user_aclkfl1_550_cam", mout_group13_5800_p,
+ SRC_TOP9, 20, 1),
+ MUX(0, "mout_user_aclk432_cam", mout_group11_5800_p,
+ SRC_TOP9, 24, 1),
+ MUX(0, "mout_user_aclk432_scaler", mout_group9_5800_p,
+ SRC_TOP9, 28, 1),
+
+ MUX(0, "mout_sw_aclk550_cam", mout_group14_5800_p, SRC_TOP13, 16, 1),
+ MUX(0, "mout_sw_aclkfl1_550_cam", mout_group12_5800_p,
+ SRC_TOP13, 20, 1),
+ MUX(0, "mout_sw_aclk432_cam", mout_group10_5800_p,
+ SRC_TOP13, 24, 1),
+ MUX(0, "mout_sw_aclk432_scaler", mout_group8_5800_p,
+ SRC_TOP13, 28, 1),
+
+ MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3),
+};
+
+struct samsung_div_clock exynos5800_div_clks[] __initdata = {
+ DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3),
+
+ DIV(0, "dout_aclk550_cam", "mout_aclk550_cam",
+ DIV_TOP8, 16, 3),
+ DIV(0, "dout_aclkfl1_550_cam", "mout_aclkfl1_550_cam",
+ DIV_TOP8, 20, 3),
+ DIV(0, "dout_aclk432_cam", "mout_aclk432_cam",
+ DIV_TOP8, 24, 3),
+ DIV(0, "dout_aclk432_scaler", "mout_aclk432_scaler",
+ DIV_TOP8, 28, 3),
+
+ DIV(0, "dout_osc_div", "fin_pll", DIV_TOP9, 20, 3),
+ DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6),
+};
+
+struct samsung_gate_clock exynos5800_gate_clks[] __initdata = {
+ GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
+ GATE_BUS_TOP, 24, 0, 0),
+ GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
+ GATE_BUS_TOP, 27, 0, 0),
+};
+
+struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+ MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1),
+ MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p,
+ TOP_SPARE2, 4, 1),
+
+ MUX(0, "mout_aclk400_isp", mout_group1_p, SRC_TOP0, 0, 2),
+ MUX_A(0, "mout_aclk400_mscl", mout_group1_p,
+ SRC_TOP0, 4, 2, "aclk400_mscl"),
+ MUX(0, "mout_aclk400_wcore", mout_group1_p, SRC_TOP0, 16, 2),
+ MUX(0, "mout_aclk100_noc", mout_group1_p, SRC_TOP0, 20, 2),
+
+ MUX(0, "mout_aclk333_432_gscl", mout_group4_p, SRC_TOP1, 0, 2),
+ MUX(0, "mout_aclk333_432_isp", mout_group4_p,
+ SRC_TOP1, 4, 2),
+ MUX(0, "mout_aclk333_432_isp0", mout_group4_p, SRC_TOP1, 12, 2),
+ MUX(0, "mout_aclk266", mout_group1_p, SRC_TOP1, 20, 2),
+ MUX(0, "mout_aclk333", mout_group1_p, SRC_TOP1, 28, 2),
+
+ MUX(0, "mout_aclk400_disp1", mout_group1_p, SRC_TOP2, 4, 2),
+ MUX(0, "mout_aclk333_g2d", mout_group1_p, SRC_TOP2, 8, 2),
+ MUX(0, "mout_aclk266_g2d", mout_group1_p, SRC_TOP2, 12, 2),
+ MUX(0, "mout_aclk300_jpeg", mout_group1_p, SRC_TOP2, 20, 2),
+ MUX(0, "mout_aclk300_disp1", mout_group1_p, SRC_TOP2, 24, 2),
+ MUX(0, "mout_aclk300_gscl", mout_group1_p, SRC_TOP2, 28, 2),
+
+ MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2),
+
+ MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
+};
+
+struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+ DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll",
+ DIV_TOP0, 16, 3),
+};
+
+static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
+ MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p,
+ SRC_TOP7, 4, 1),
+ MUX(0, "mout_mspll_kfc", mout_mspll_cpu_p, SRC_TOP7, 8, 2),
+ MUX(0, "mout_mspll_cpu", mout_mspll_cpu_p, SRC_TOP7, 12, 2),
+
+ MUX(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+ MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+ MUX(0, "mout_kpll", mout_kpll_p, SRC_KFC, 0, 1),
+ MUX(0, "mout_kfc", mout_kfc_p, SRC_KFC, 16, 1),
+
+ MUX(0, "mout_aclk200", mout_group1_p, SRC_TOP0, 8, 2),
+ MUX(0, "mout_aclk200_fsys2", mout_group1_p, SRC_TOP0, 12, 2),
+ MUX(0, "mout_pclk200_fsys", mout_group1_p, SRC_TOP0, 24, 2),
+ MUX(0, "mout_aclk200_fsys", mout_group1_p, SRC_TOP0, 28, 2),
+
+ MUX(0, "mout_aclk66", mout_group1_p, SRC_TOP1, 8, 2),
+ MUX(0, "mout_aclk166", mout_group1_p, SRC_TOP1, 24, 2),
+
+ MUX(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1),
+
+ MUX(0, "mout_user_aclk400_isp", mout_user_aclk400_isp_p,
+ SRC_TOP3, 0, 1),
+ MUX(0, "mout_user_aclk400_mscl", mout_user_aclk400_mscl_p,
SRC_TOP3, 4, 1),
- MUX_A(0, "mout_aclk200_disp1", aclk200_disp1_p,
- SRC_TOP3, 8, 1, "aclk200_disp1"),
- MUX(0, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p,
+ MUX(0, "mout_user_aclk200_disp1", mout_user_aclk200_disp1_p,
+ SRC_TOP3, 8, 1),
+ MUX(0, "mout_user_aclk200_fsys2", mout_user_aclk200_fsys2_p,
SRC_TOP3, 12, 1),
- MUX(0, "mout_user_aclk200_fsys", user_aclk200_fsys_p,
+ MUX(0, "mout_user_aclk400_wcore", mout_user_aclk400_wcore_p,
+ SRC_TOP3, 16, 1),
+ MUX(0, "mout_user_aclk100_noc", mout_user_aclk100_noc_p,
+ SRC_TOP3, 20, 1),
+ MUX(0, "mout_user_pclk200_fsys", mout_user_pclk200_fsys_p,
+ SRC_TOP3, 24, 1),
+ MUX(0, "mout_user_aclk200_fsys", mout_user_aclk200_fsys_p,
SRC_TOP3, 28, 1),
- MUX(0, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p,
+ MUX(0, "mout_user_aclk333_432_gscl", mout_user_aclk333_432_gscl_p,
SRC_TOP4, 0, 1),
- MUX(0, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1),
- MUX(0, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1),
- MUX(0, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1),
- MUX(0, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1),
-
- MUX(0, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1),
- MUX(0, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1),
- MUX(0, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1),
- MUX_A(0, "mout_user_aclk_g3d", user_aclk_g3d_p,
- SRC_TOP5, 16, 1, "aclkg3d"),
- MUX(0, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p,
+ MUX(0, "mout_user_aclk333_432_isp", mout_user_aclk333_432_isp_p,
+ SRC_TOP4, 4, 1),
+ MUX(0, "mout_user_aclk66_peric", mout_user_aclk66_peric_p,
+ SRC_TOP4, 8, 1),
+ MUX(0, "mout_user_aclk333_432_isp0", mout_user_aclk333_432_isp0_p,
+ SRC_TOP4, 12, 1),
+ MUX(0, "mout_user_aclk266_isp", mout_user_aclk266_isp_p,
+ SRC_TOP4, 16, 1),
+ MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
+ MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
+ MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1),
+
+ MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
+ SRC_TOP5, 0, 1),
+ MUX(0, "mout_user_aclk66_psgen", mout_user_aclk66_peric_p,
+ SRC_TOP5, 4, 1),
+ MUX(0, "mout_user_aclk333_g2d", mout_user_aclk333_g2d_p,
+ SRC_TOP5, 8, 1),
+ MUX(0, "mout_user_aclk266_g2d", mout_user_aclk266_g2d_p,
+ SRC_TOP5, 12, 1),
+ MUX(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p,
+ SRC_TOP5, 16, 1),
+ MUX(0, "mout_user_aclk300_jpeg", mout_user_aclk300_jpeg_p,
SRC_TOP5, 20, 1),
- MUX(0, "mout_user_aclk300_disp1", user_aclk300_disp1_p,
+ MUX(0, "mout_user_aclk300_disp1", mout_user_aclk300_disp1_p,
SRC_TOP5, 24, 1),
- MUX(0, "mout_user_aclk300_gscl", user_aclk300_gscl_p,
+ MUX(0, "mout_user_aclk300_gscl", mout_user_aclk300_gscl_p,
SRC_TOP5, 28, 1),
- MUX(0, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1),
- MUX(0, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1),
- MUX(0, "sclk_spll", spll_p, SRC_TOP6, 8, 1),
- MUX(0, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1),
- MUX(0, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1),
- MUX(0, "sclk_epll", epll_p, SRC_TOP6, 20, 1),
- MUX(0, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1),
- MUX(0, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1),
-
- MUX(0, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1),
- MUX(0, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1),
- MUX(0, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p,
+ MUX(0, "mout_sclk_mpll", mout_mpll_p, SRC_TOP6, 0, 1),
+ MUX(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1),
+ MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1),
+ MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1),
+ MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1),
+ MUX(0, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1),
+ MUX(0, "mout_sclk_dpll", mout_dpll_p, SRC_TOP6, 24, 1),
+ MUX(0, "mout_sclk_cpll", mout_cpll_p, SRC_TOP6, 28, 1),
+
+ MUX(0, "mout_sw_aclk400_isp", mout_sw_aclk400_isp_p,
+ SRC_TOP10, 0, 1),
+ MUX(0, "mout_sw_aclk400_mscl", mout_sw_aclk400_mscl_p,
+ SRC_TOP10, 4, 1),
+ MUX(0, "mout_sw_aclk200", mout_sw_aclk200_p, SRC_TOP10, 8, 1),
+ MUX(0, "mout_sw_aclk200_fsys2", mout_sw_aclk200_fsys2_p,
SRC_TOP10, 12, 1),
- MUX(0, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1),
-
- MUX(0, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p,
+ MUX(0, "mout_sw_aclk400_wcore", mout_sw_aclk400_wcore_p,
+ SRC_TOP10, 16, 1),
+ MUX(0, "mout_sw_aclk100_noc", mout_sw_aclk100_noc_p,
+ SRC_TOP10, 20, 1),
+ MUX(0, "mout_sw_pclk200_fsys", mout_sw_pclk200_fsys_p,
+ SRC_TOP10, 24, 1),
+ MUX(0, "mout_sw_aclk200_fsys", mout_sw_aclk200_fsys_p,
+ SRC_TOP10, 28, 1),
+
+ MUX(0, "mout_sw_aclk333_432_gscl", mout_sw_aclk333_432_gscl_p,
SRC_TOP11, 0, 1),
- MUX(0, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1),
- MUX(0, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1),
- MUX(0, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1),
- MUX(0, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1),
-
- MUX(0, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1),
- MUX(0, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1),
- MUX(0, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1),
- MUX(0, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1),
- MUX(0, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p,
+ MUX(0, "mout_sw_aclk333_432_isp", mout_sw_aclk333_432_isp_p,
+ SRC_TOP11, 4, 1),
+ MUX(0, "mout_sw_aclk66", mout_sw_aclk66_p, SRC_TOP11, 8, 1),
+ MUX(0, "mout_sw_aclk333_432_isp0", mout_sw_aclk333_432_isp0_p,
+ SRC_TOP11, 12, 1),
+ MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
+ MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
+ MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1),
+
+ MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
+ SRC_TOP12, 4, 1),
+ MUX(0, "mout_sw_aclk333_g2d", mout_sw_aclk333_g2d_p,
+ SRC_TOP12, 8, 1),
+ MUX(0, "mout_sw_aclk266_g2d", mout_sw_aclk266_g2d_p,
+ SRC_TOP12, 12, 1),
+ MUX(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1),
+ MUX(0, "mout_sw_aclk300_jpeg", mout_sw_aclk300_jpeg_p,
+ SRC_TOP12, 20, 1),
+ MUX(0, "mout_sw_aclk300_disp1", mout_sw_aclk300_disp1_p,
SRC_TOP12, 24, 1),
- MUX(0, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1),
+ MUX(0, "mout_sw_aclk300_gscl", mout_sw_aclk300_gscl_p,
+ SRC_TOP12, 28, 1),
/* DISP1 Block */
- MUX(0, "mout_fimd1", group3_p, SRC_DISP10, 4, 1),
- MUX(0, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
- MUX(0, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
- MUX(0, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
- MUX(CLK_MOUT_HDMI, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+ MUX(0, "mout_mipi1", mout_group2_p, SRC_DISP10, 16, 3),
+ MUX(0, "mout_dp1", mout_group2_p, SRC_DISP10, 20, 3),
+ MUX(0, "mout_pixel", mout_group2_p, SRC_DISP10, 24, 3),
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_DISP10, 28, 1),
+ MUX(0, "mout_fimd1_opt", mout_group2_p, SRC_DISP10, 8, 3),
+
+ MUX(0, "mout_fimd1_final", mout_fimd1_final_p, TOP_SPARE2, 8, 1),
/* MAU Block */
- MUX(0, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
+ MUX(CLK_MOUT_MAUDIO0, "mout_maudio0", mout_maudio0_p, SRC_MAU, 28, 3),
/* FSYS Block */
- MUX(0, "mout_usbd301", group2_p, SRC_FSYS, 4, 3),
- MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 8, 3),
- MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 12, 3),
- MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 16, 3),
- MUX(0, "mout_usbd300", group2_p, SRC_FSYS, 20, 3),
- MUX(0, "mout_unipro", group2_p, SRC_FSYS, 24, 3),
+ MUX(0, "mout_usbd301", mout_group2_p, SRC_FSYS, 4, 3),
+ MUX(0, "mout_mmc0", mout_group2_p, SRC_FSYS, 8, 3),
+ MUX(0, "mout_mmc1", mout_group2_p, SRC_FSYS, 12, 3),
+ MUX(0, "mout_mmc2", mout_group2_p, SRC_FSYS, 16, 3),
+ MUX(0, "mout_usbd300", mout_group2_p, SRC_FSYS, 20, 3),
+ MUX(0, "mout_unipro", mout_group2_p, SRC_FSYS, 24, 3),
+ MUX(0, "mout_mphy_refclk", mout_group2_p, SRC_FSYS, 28, 3),
/* PERIC Block */
- MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 4, 3),
- MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 8, 3),
- MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 12, 3),
- MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 16, 3),
- MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 3),
- MUX(0, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3),
- MUX(0, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3),
- MUX(0, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3),
- MUX(0, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3),
- MUX(0, "mout_spi0", group2_p, SRC_PERIC1, 20, 3),
- MUX(0, "mout_spi1", group2_p, SRC_PERIC1, 24, 3),
- MUX(0, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
+ MUX(0, "mout_uart0", mout_group2_p, SRC_PERIC0, 4, 3),
+ MUX(0, "mout_uart1", mout_group2_p, SRC_PERIC0, 8, 3),
+ MUX(0, "mout_uart2", mout_group2_p, SRC_PERIC0, 12, 3),
+ MUX(0, "mout_uart3", mout_group2_p, SRC_PERIC0, 16, 3),
+ MUX(0, "mout_pwm", mout_group2_p, SRC_PERIC0, 24, 3),
+ MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIC0, 28, 3),
+ MUX(0, "mout_audio0", mout_audio0_p, SRC_PERIC1, 8, 3),
+ MUX(0, "mout_audio1", mout_audio1_p, SRC_PERIC1, 12, 3),
+ MUX(0, "mout_audio2", mout_audio2_p, SRC_PERIC1, 16, 3),
+ MUX(0, "mout_spi0", mout_group2_p, SRC_PERIC1, 20, 3),
+ MUX(0, "mout_spi1", mout_group2_p, SRC_PERIC1, 24, 3),
+ MUX(0, "mout_spi2", mout_group2_p, SRC_PERIC1, 28, 3),
+
+ /* ISP Block */
+ MUX(0, "mout_pwm_isp", mout_group2_p, SRC_ISP, 24, 3),
+ MUX(0, "mout_uart_isp", mout_group2_p, SRC_ISP, 20, 3),
+ MUX(0, "mout_spi0_isp", mout_group2_p, SRC_ISP, 12, 3),
+ MUX(0, "mout_spi1_isp", mout_group2_p, SRC_ISP, 16, 3),
+ MUX(0, "mout_isp_sensor", mout_group2_p, SRC_ISP, 28, 3),
};
-static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5x_div_clks[] __initdata = {
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3),
- DIV(0, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3),
+ DIV(0, "div_kfc", "mout_kfc", DIV_KFC0, 0, 3),
DIV(0, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
+ DIV(0, "dout_aclk400_isp", "mout_aclk400_isp", DIV_TOP0, 0, 3),
DIV(0, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
DIV(0, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
DIV(0, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
+ DIV(0, "dout_aclk100_noc", "mout_aclk100_noc", DIV_TOP0, 20, 3),
DIV(0, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
DIV(0, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
DIV(0, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
DIV_TOP1, 0, 3),
+ DIV(0, "dout_aclk333_432_isp", "mout_aclk333_432_isp",
+ DIV_TOP1, 4, 3),
DIV(0, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
+ DIV(0, "dout_aclk333_432_isp0", "mout_aclk333_432_isp0",
+ DIV_TOP1, 16, 3),
DIV(0, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
DIV(0, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
DIV(0, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
@@ -458,15 +773,16 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(0, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
DIV(0, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
DIV(0, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
- DIV_A(0, "dout_aclk300_disp1", "mout_aclk300_disp1",
- DIV_TOP2, 24, 3, "aclk300_disp1"),
+ DIV(0, "dout_aclk300_disp1", "mout_aclk300_disp1", DIV_TOP2, 24, 3),
DIV(0, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
/* DISP1 Block */
- DIV(0, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
+ DIV(0, "dout_fimd1", "mout_fimd1_final", DIV_DISP10, 0, 4),
DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+ DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
+ DIV(0, "dout_aclk400_disp1", "mout_aclk400_disp1", DIV_TOP2, 4, 3),
/* Audio Block */
DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
@@ -484,6 +800,7 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(0, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10),
DIV(0, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8),
+ DIV(0, "dout_mphy_refclk", "mout_mphy_refclk", DIV_FSYS2, 16, 8),
/* UART and PWM */
DIV(0, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4),
@@ -497,6 +814,9 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
+ /* Mfc Block */
+ DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
+
/* PCM */
DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
DIV(0, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8),
@@ -509,15 +829,43 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(0, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4),
/* SPI Pre-Ratio */
- DIV(0, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8),
- DIV(0, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8),
- DIV(0, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
+ DIV(0, "dout_spi0_pre", "dout_spi0", DIV_PERIC4, 8, 8),
+ DIV(0, "dout_spi1_pre", "dout_spi1", DIV_PERIC4, 16, 8),
+ DIV(0, "dout_spi2_pre", "dout_spi2", DIV_PERIC4, 24, 8),
+
+ /* GSCL Block */
+ DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl",
+ DIV2_RATIO0, 4, 2),
+ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
+
+ /* MSCL Block */
+ DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
+
+ /* PSGEN */
+ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
+ DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
+
+ /* ISP Block */
+ DIV(0, "dout_isp_sensor0", "mout_isp_sensor", SCLK_DIV_ISP0, 8, 8),
+ DIV(0, "dout_isp_sensor1", "mout_isp_sensor", SCLK_DIV_ISP0, 16, 8),
+ DIV(0, "dout_isp_sensor2", "mout_isp_sensor", SCLK_DIV_ISP0, 24, 8),
+ DIV(0, "dout_pwm_isp", "mout_pwm_isp", SCLK_DIV_ISP1, 28, 4),
+ DIV(0, "dout_uart_isp", "mout_uart_isp", SCLK_DIV_ISP1, 24, 4),
+ DIV(0, "dout_spi0_isp", "mout_spi0_isp", SCLK_DIV_ISP1, 16, 4),
+ DIV(0, "dout_spi1_isp", "mout_spi1_isp", SCLK_DIV_ISP1, 20, 4),
+ DIV_F(0, "dout_spi0_isp_pre", "dout_spi0_isp", SCLK_DIV_ISP1, 0, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(0, "dout_spi1_isp_pre", "dout_spi1_isp", SCLK_DIV_ISP1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
- /* TODO: Re-verify the CG bits for all the gate clocks */
- GATE_A(CLK_MCT, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0,
- "mct"),
+static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
+ /* G2D */
+ GATE(CLK_MDMA0, "mdma0", "aclk266_g2d", GATE_IP_G2D, 1, 0, 0),
+ GATE(CLK_SSS, "sss", "aclk266_g2d", GATE_IP_G2D, 2, 0, 0),
+ GATE(CLK_G2D, "g2d", "aclk333_g2d", GATE_IP_G2D, 3, 0, 0),
+ GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "aclk266_g2d", GATE_IP_G2D, 5, 0, 0),
+ GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
@@ -530,20 +878,42 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
+ GATE_BUS_TOP, 5, 0, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
- GATE(0, "pclk66_gpio", "mout_sw_aclk66",
+ GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
+ GATE_BUS_TOP, 8, 0, 0),
+ GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio",
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
- GATE(0, "aclk66_psgen", "mout_aclk66_psgen",
+ GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
- GATE(0, "aclk66_peric", "mout_aclk66_peric",
- GATE_BUS_TOP, 11, 0, 0),
+ GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
+ GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
+ GATE_BUS_TOP, 13, 0, 0),
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333", "mout_aclk333",
GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
+ GATE_BUS_TOP, 16, 0, 0),
+ GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
+ GATE_BUS_TOP, 17, 0, 0),
+ GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
+ GATE_BUS_TOP, 18, 0, 0),
+ GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
+ GATE_BUS_TOP, 28, 0, 0),
+ GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
+ GATE_BUS_TOP, 29, 0, 0),
+
+ GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
+ SRC_MASK_TOP2, 24, 0, 0),
+
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
+ SRC_MASK_TOP7, 20, 0, 0),
/* sclk */
GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0",
@@ -554,11 +924,11 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE_TOP_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_uart3",
GATE_TOP_SCLK_PERIC, 3, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_pre_spi0",
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_spi0_pre",
GATE_TOP_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_pre_spi1",
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_spi1_pre",
GATE_TOP_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_pre_spi2",
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_spi2_pre",
GATE_TOP_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
GATE_TOP_SCLK_PERIC, 9, CLK_SET_RATE_PARENT, 0),
@@ -588,164 +958,191 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301",
GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_USBD301, "sclk_unipro", "dout_unipro",
- SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
-
- GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "aclK333_432_gscl",
- GATE_TOP_SCLK_GSCL, 6, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "aclk333_432_gscl",
- GATE_TOP_SCLK_GSCL, 7, CLK_SET_RATE_PARENT, 0),
-
/* Display */
GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "dout_fimd1",
- GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0),
+ GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "dout_mipi1",
- GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0),
+ GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
- GATE_TOP_SCLK_DISP1, 9, CLK_SET_RATE_PARENT, 0),
+ GATE_TOP_SCLK_DISP1, 9, 0, 0),
GATE(CLK_SCLK_PIXEL, "sclk_pixel", "dout_hdmi_pixel",
- GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0),
+ GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
- GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
+ GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
/* Maudio Block */
GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
- /* FSYS */
+
+ /* FSYS Block */
GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
GATE(CLK_PDMA1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0),
GATE(CLK_UFS, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0),
- GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0),
- GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0),
- GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0),
- GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0),
+ GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_IP_FSYS, 9, 0, 0),
+ GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_IP_FSYS, 14, 0, 0),
GATE(CLK_SROMC, "sromc", "aclk200_fsys2",
- GATE_BUS_FSYS0, 19, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0),
- GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0),
- GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0),
-
- /* UART */
- GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0),
- GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0),
- GATE_A(CLK_UART2, "uart2", "aclk66_peric",
- GATE_BUS_PERIC, 6, CLK_IGNORE_UNUSED, 0, "uart2"),
- GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0),
- /* I2C */
- GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0),
- GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0),
- GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0),
- GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0),
- GATE(CLK_I2C4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0),
- GATE(CLK_I2C5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0),
- GATE(CLK_I2C6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0),
- GATE(CLK_I2C7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0),
- GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0,
- 0),
- GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0),
- /* SPI */
- GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0),
- GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0),
- GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0),
- GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
- /* I2S */
- GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0),
- GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0),
- /* PCM */
- GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0),
- GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0),
- /* PWM */
- GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0),
- /* SPDIF */
- GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0),
+ GATE_IP_FSYS, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_IP_FSYS, 19, 0, 0),
+ GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_IP_FSYS, 20, 0, 0),
+ GATE(CLK_SCLK_UNIPRO, "sclk_unipro", "dout_unipro",
+ SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_I2C8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0),
- GATE(CLK_I2C9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0),
- GATE(CLK_I2C10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0),
+ /* PERIC Block */
+ GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0),
+ GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0),
+ GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0),
+ GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0),
+ GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0),
+ GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+
+ /* PERIS Block */
GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
- GATE_BUS_PERIS0, 12, CLK_IGNORE_UNUSED, 0),
+ GATE_IP_PERIS, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk66_psgen",
- GATE_BUS_PERIS0, 13, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0),
- GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0),
- GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0),
- GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0),
- GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0),
- GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0),
- GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0),
- GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0),
- GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0),
- GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0),
-
- GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0,
- 0),
+ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_IP_PERIS, 6, 0, 0),
+ GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_IP_PERIS, 7, 0, 0),
+ GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_IP_PERIS, 8, 0, 0),
+ GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_IP_PERIS, 9, 0, 0),
+ GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_IP_PERIS, 10, 0, 0),
+ GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_IP_PERIS, 11, 0, 0),
+ GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_IP_PERIS, 12, 0, 0),
+ GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_IP_PERIS, 13, 0, 0),
+ GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_IP_PERIS, 14, 0, 0),
+ GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_IP_PERIS, 15, 0, 0),
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_IP_PERIS, 16, 0, 0),
+ GATE(CLK_MCT, "mct", "aclk66_psgen", GATE_IP_PERIS, 18, 0, 0),
+ GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_IP_PERIS, 19, 0, 0),
+ GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_IP_PERIS, 20, 0, 0),
+ GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0),
+ GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0),
+
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
- GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0),
- GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0),
- GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0),
- GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0),
+
+ /* GEN Block */
+ GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
+ GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
+ GATE(CLK_MDMA1, "mdma1", "mout_user_aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(CLK_TOP_RTC, "top_rtc", "aclk66_psgen", GATE_IP_GEN, 5, 0, 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "dout_gen_blk",
+ GATE_IP_GEN, 6, 0, 0),
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "dout_jpg_blk", GATE_IP_GEN, 7, 0, 0),
+ GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "dout_gen_blk",
+ GATE_IP_GEN, 9, 0, 0),
+
+ /* GATE_IP_GEN doesn't list gates for smmu_jpeg2 and mc */
+ GATE(CLK_SMMU_JPEG2, "smmu_jpeg2", "dout_jpg_blk",
+ GATE_BUS_GEN, 28, 0, 0),
+ GATE(CLK_MC, "mc", "aclk66_psgen", GATE_BUS_GEN, 12, 0, 0),
+
+ /* GSCL Block */
+ GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "mout_user_aclk333_432_gscl",
+ GATE_TOP_SCLK_GSCL, 6, 0, 0),
+ GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "mout_user_aclk333_432_gscl",
+ GATE_TOP_SCLK_GSCL, 7, 0, 0),
GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
- GATE(CLK_CLK_3AA, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0),
-
- GATE(CLK_SMMU_3AA, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0,
- 0),
- GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "aclk333_432_gscl",
+ GATE(CLK_FIMC_3AA, "fimc_3aa", "aclk333_432_gscl",
+ GATE_IP_GSCL0, 4, 0, 0),
+ GATE(CLK_FIMC_LITE0, "fimc_lite0", "aclk333_432_gscl",
+ GATE_IP_GSCL0, 5, 0, 0),
+ GATE(CLK_FIMC_LITE1, "fimc_lite1", "aclk333_432_gscl",
+ GATE_IP_GSCL0, 6, 0, 0),
+
+ GATE(CLK_SMMU_3AA, "smmu_3aa", "dout_gscl_blk_333",
+ GATE_IP_GSCL1, 2, 0, 0),
+ GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "dout_gscl_blk_333",
GATE_IP_GSCL1, 3, 0, 0),
- GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "aclk333_432_gscl",
+ GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0,
- 0),
- GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0,
- 0),
- GATE(CLK_GSCL_WA, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0),
- GATE(CLK_GSCL_WB, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0),
- GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "aclk333_432_gscl",
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 7, 0, 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
+ GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
GATE_IP_GSCL1, 16, 0, 0),
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
+ /* MSCL Block */
+ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
+ GATE_IP_MSCL, 8, 0, 0),
+ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
+ GATE_IP_MSCL, 9, 0, 0),
+ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
+ GATE_IP_MSCL, 10, 0, 0),
+
GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
- GATE(CLK_MIXER, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0,
- 0),
+ GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk",
+ GATE_IP_DISP1, 7, 0, 0),
+ GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk",
+ GATE_IP_DISP1, 8, 0, 0),
+ GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1",
+ GATE_IP_DISP1, 9, 0, 0),
+
+ /* ISP */
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
+ GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "dout_spi0_isp_pre",
+ GATE_TOP_SCLK_ISP, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "dout_spi1_isp_pre",
+ GATE_TOP_SCLK_ISP, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "dout_pwm_isp",
+ GATE_TOP_SCLK_ISP, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_ISP_SENSOR0, "sclk_isp_sensor0", "dout_isp_sensor0",
+ GATE_TOP_SCLK_ISP, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_ISP_SENSOR1, "sclk_isp_sensor1", "dout_isp_sensor1",
+ GATE_TOP_SCLK_ISP, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
+ GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
-
- GATE(CLK_G3D, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "dout_mfc_blk", GATE_IP_MFC, 1, 0, 0),
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "dout_mfc_blk", GATE_IP_MFC, 2, 0, 0),
- GATE(CLK_ROTATOR, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
- GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
- GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
- GATE(CLK_MDMA1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
- GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
- GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0),
- GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
-
- GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
- GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
- GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
- GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0,
- 0),
- GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0,
- 0),
- GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0,
- 0),
- GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0,
- 0),
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
};
-static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
+static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
[apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
[cpll] = PLL(pll_2550, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
@@ -776,8 +1173,11 @@ static struct of_device_id ext_clk_match[] __initdata = {
};
/* register exynos5420 clocks */
-static void __init exynos5420_clk_init(struct device_node *np)
+static void __init exynos5x_clk_init(struct device_node *np,
+ enum exynos5x_soc soc)
{
+ struct samsung_clk_provider *ctx;
+
if (np) {
reg_base = of_iomap(np, 0);
if (!reg_base)
@@ -786,23 +1186,56 @@ static void __init exynos5420_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
- ARRAY_SIZE(exynos5420_fixed_rate_ext_clks),
+ exynos5x_soc = soc;
+
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
+ ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
ext_clk_match);
- samsung_clk_register_pll(exynos5420_plls, ARRAY_SIZE(exynos5420_plls),
+ samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls),
reg_base);
- samsung_clk_register_fixed_rate(exynos5420_fixed_rate_clks,
- ARRAY_SIZE(exynos5420_fixed_rate_clks));
- samsung_clk_register_fixed_factor(exynos5420_fixed_factor_clks,
- ARRAY_SIZE(exynos5420_fixed_factor_clks));
- samsung_clk_register_mux(exynos5420_mux_clks,
- ARRAY_SIZE(exynos5420_mux_clks));
- samsung_clk_register_div(exynos5420_div_clks,
- ARRAY_SIZE(exynos5420_div_clks));
- samsung_clk_register_gate(exynos5420_gate_clks,
- ARRAY_SIZE(exynos5420_gate_clks));
+ samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks,
+ ARRAY_SIZE(exynos5x_fixed_rate_clks));
+ samsung_clk_register_fixed_factor(ctx, exynos5x_fixed_factor_clks,
+ ARRAY_SIZE(exynos5x_fixed_factor_clks));
+ samsung_clk_register_mux(ctx, exynos5x_mux_clks,
+ ARRAY_SIZE(exynos5x_mux_clks));
+ samsung_clk_register_div(ctx, exynos5x_div_clks,
+ ARRAY_SIZE(exynos5x_div_clks));
+ samsung_clk_register_gate(ctx, exynos5x_gate_clks,
+ ARRAY_SIZE(exynos5x_gate_clks));
+
+ if (soc == EXYNOS5420) {
+ samsung_clk_register_mux(ctx, exynos5420_mux_clks,
+ ARRAY_SIZE(exynos5420_mux_clks));
+ samsung_clk_register_div(ctx, exynos5420_div_clks,
+ ARRAY_SIZE(exynos5420_div_clks));
+ } else {
+ samsung_clk_register_fixed_factor(
+ ctx, exynos5800_fixed_factor_clks,
+ ARRAY_SIZE(exynos5800_fixed_factor_clks));
+ samsung_clk_register_mux(ctx, exynos5800_mux_clks,
+ ARRAY_SIZE(exynos5800_mux_clks));
+ samsung_clk_register_div(ctx, exynos5800_div_clks,
+ ARRAY_SIZE(exynos5800_div_clks));
+ samsung_clk_register_gate(ctx, exynos5800_gate_clks,
+ ARRAY_SIZE(exynos5800_gate_clks));
+ }
exynos5420_clk_sleep_init();
}
+
+static void __init exynos5420_clk_init(struct device_node *np)
+{
+ exynos5x_clk_init(np, EXYNOS5420);
+}
CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init);
+
+static void __init exynos5800_clk_init(struct device_node *np)
+{
+ exynos5x_clk_init(np, EXYNOS5800);
+}
+CLK_OF_DECLARE(exynos5800_clk, "samsung,exynos5800-clock", exynos5800_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 2bfad5a993d0..647f1440aa6a 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -93,6 +93,7 @@ static struct of_device_id ext_clk_match[] __initdata = {
static void __init exynos5440_clk_init(struct device_node *np)
{
void __iomem *reg_base;
+ struct samsung_clk_provider *ctx;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -101,22 +102,25 @@ static void __init exynos5440_clk_init(struct device_node *np)
return;
}
- samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks,
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
samsung_clk_register_pll2550x("cplla", "xtal", reg_base + 0x1c, 0x10);
samsung_clk_register_pll2550x("cpllb", "xtal", reg_base + 0x20, 0x10);
- samsung_clk_register_fixed_rate(exynos5440_fixed_rate_clks,
+ samsung_clk_register_fixed_rate(ctx, exynos5440_fixed_rate_clks,
ARRAY_SIZE(exynos5440_fixed_rate_clks));
- samsung_clk_register_fixed_factor(exynos5440_fixed_factor_clks,
+ samsung_clk_register_fixed_factor(ctx, exynos5440_fixed_factor_clks,
ARRAY_SIZE(exynos5440_fixed_factor_clks));
- samsung_clk_register_mux(exynos5440_mux_clks,
+ samsung_clk_register_mux(ctx, exynos5440_mux_clks,
ARRAY_SIZE(exynos5440_mux_clks));
- samsung_clk_register_div(exynos5440_div_clks,
+ samsung_clk_register_div(ctx, exynos5440_div_clks,
ARRAY_SIZE(exynos5440_div_clks));
- samsung_clk_register_gate(exynos5440_gate_clks,
+ samsung_clk_register_gate(ctx, exynos5440_gate_clks,
ARRAY_SIZE(exynos5440_gate_clks));
pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 81e6d2f49aa0..b07fad2a9167 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/hrtimer.h>
+#include <linux/delay.h>
#include "clk.h"
#include "clk-pll.h"
@@ -59,6 +60,72 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
}
/*
+ * PLL2126 Clock Type
+ */
+
+#define PLL2126_MDIV_MASK (0xff)
+#define PLL2126_PDIV_MASK (0x3f)
+#define PLL2126_SDIV_MASK (0x3)
+#define PLL2126_MDIV_SHIFT (16)
+#define PLL2126_PDIV_SHIFT (8)
+#define PLL2126_SDIV_SHIFT (0)
+
+static unsigned long samsung_pll2126_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con, mdiv, pdiv, sdiv;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL2126_MDIV_SHIFT) & PLL2126_MDIV_MASK;
+ pdiv = (pll_con >> PLL2126_PDIV_SHIFT) & PLL2126_PDIV_MASK;
+ sdiv = (pll_con >> PLL2126_SDIV_SHIFT) & PLL2126_SDIV_MASK;
+
+ fvco *= (mdiv + 8);
+ do_div(fvco, (pdiv + 2) << sdiv);
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll2126_clk_ops = {
+ .recalc_rate = samsung_pll2126_recalc_rate,
+};
+
+/*
+ * PLL3000 Clock Type
+ */
+
+#define PLL3000_MDIV_MASK (0xff)
+#define PLL3000_PDIV_MASK (0x3)
+#define PLL3000_SDIV_MASK (0x3)
+#define PLL3000_MDIV_SHIFT (16)
+#define PLL3000_PDIV_SHIFT (8)
+#define PLL3000_SDIV_SHIFT (0)
+
+static unsigned long samsung_pll3000_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con, mdiv, pdiv, sdiv;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL3000_MDIV_SHIFT) & PLL3000_MDIV_MASK;
+ pdiv = (pll_con >> PLL3000_PDIV_SHIFT) & PLL3000_PDIV_MASK;
+ sdiv = (pll_con >> PLL3000_SDIV_SHIFT) & PLL3000_SDIV_MASK;
+
+ fvco *= (2 * (mdiv + 8));
+ do_div(fvco, pdiv << sdiv);
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll3000_clk_ops = {
+ .recalc_rate = samsung_pll3000_recalc_rate,
+};
+
+/*
* PLL35xx Clock Type
*/
/* Maximum lock time can be 270 * PDIV cycles */
@@ -564,7 +631,9 @@ static const struct clk_ops samsung_pll46xx_clk_min_ops = {
#define PLL6552_PDIV_MASK 0x3f
#define PLL6552_SDIV_MASK 0x7
#define PLL6552_MDIV_SHIFT 16
+#define PLL6552_MDIV_SHIFT_2416 14
#define PLL6552_PDIV_SHIFT 8
+#define PLL6552_PDIV_SHIFT_2416 5
#define PLL6552_SDIV_SHIFT 0
static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
@@ -575,8 +644,13 @@ static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con = __raw_readl(pll->con_reg);
- mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
- pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
+ if (pll->type == pll_6552_s3c2416) {
+ mdiv = (pll_con >> PLL6552_MDIV_SHIFT_2416) & PLL6552_MDIV_MASK;
+ pdiv = (pll_con >> PLL6552_PDIV_SHIFT_2416) & PLL6552_PDIV_MASK;
+ } else {
+ mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
+ pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
+ }
sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK;
fvco *= mdiv;
@@ -628,6 +702,169 @@ static const struct clk_ops samsung_pll6553_clk_ops = {
};
/*
+ * PLL Clock Type of S3C24XX before S3C2443
+ */
+
+#define PLLS3C2410_MDIV_MASK (0xff)
+#define PLLS3C2410_PDIV_MASK (0x1f)
+#define PLLS3C2410_SDIV_MASK (0x3)
+#define PLLS3C2410_MDIV_SHIFT (12)
+#define PLLS3C2410_PDIV_SHIFT (4)
+#define PLLS3C2410_SDIV_SHIFT (0)
+
+#define PLLS3C2410_ENABLE_REG_OFFSET 0x10
+
+static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con, mdiv, pdiv, sdiv;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
+ pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
+ sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
+
+ fvco *= (mdiv + 8);
+ do_div(fvco, (pdiv + 2) << sdiv);
+
+ return (unsigned int)fvco;
+}
+
+static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con, mdiv, pdiv, sdiv;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
+ pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
+ sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
+
+ fvco *= (2 * (mdiv + 8));
+ do_div(fvco, (pdiv + 2) << sdiv);
+
+ return (unsigned int)fvco;
+}
+
+static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 tmp;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ tmp = __raw_readl(pll->con_reg);
+
+ /* Change PLL PMS values */
+ tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) |
+ (PLLS3C2410_PDIV_MASK << PLLS3C2410_PDIV_SHIFT) |
+ (PLLS3C2410_SDIV_MASK << PLLS3C2410_SDIV_SHIFT));
+ tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) |
+ (rate->pdiv << PLLS3C2410_PDIV_SHIFT) |
+ (rate->sdiv << PLLS3C2410_SDIV_SHIFT);
+ __raw_writel(tmp, pll->con_reg);
+
+ /* Time to settle according to the manual */
+ udelay(300);
+
+ return 0;
+}
+
+static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_en = __raw_readl(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
+ u32 pll_en_orig = pll_en;
+
+ if (enable)
+ pll_en &= ~BIT(bit);
+ else
+ pll_en |= BIT(bit);
+
+ __raw_writel(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
+
+ /* if we started the UPLL, then allow to settle */
+ if (enable && (pll_en_orig & BIT(bit)))
+ udelay(300);
+
+ return 0;
+}
+
+static int samsung_s3c2410_mpll_enable(struct clk_hw *hw)
+{
+ return samsung_s3c2410_pll_enable(hw, 5, true);
+}
+
+static void samsung_s3c2410_mpll_disable(struct clk_hw *hw)
+{
+ samsung_s3c2410_pll_enable(hw, 5, false);
+}
+
+static int samsung_s3c2410_upll_enable(struct clk_hw *hw)
+{
+ return samsung_s3c2410_pll_enable(hw, 7, true);
+}
+
+static void samsung_s3c2410_upll_disable(struct clk_hw *hw)
+{
+ samsung_s3c2410_pll_enable(hw, 7, false);
+}
+
+static const struct clk_ops samsung_s3c2410_mpll_clk_min_ops = {
+ .recalc_rate = samsung_s3c2410_pll_recalc_rate,
+ .enable = samsung_s3c2410_mpll_enable,
+ .disable = samsung_s3c2410_mpll_disable,
+};
+
+static const struct clk_ops samsung_s3c2410_upll_clk_min_ops = {
+ .recalc_rate = samsung_s3c2410_pll_recalc_rate,
+ .enable = samsung_s3c2410_upll_enable,
+ .disable = samsung_s3c2410_upll_disable,
+};
+
+static const struct clk_ops samsung_s3c2440_mpll_clk_min_ops = {
+ .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
+ .enable = samsung_s3c2410_mpll_enable,
+ .disable = samsung_s3c2410_mpll_disable,
+};
+
+static const struct clk_ops samsung_s3c2410_mpll_clk_ops = {
+ .recalc_rate = samsung_s3c2410_pll_recalc_rate,
+ .enable = samsung_s3c2410_mpll_enable,
+ .disable = samsung_s3c2410_mpll_disable,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_s3c2410_pll_set_rate,
+};
+
+static const struct clk_ops samsung_s3c2410_upll_clk_ops = {
+ .recalc_rate = samsung_s3c2410_pll_recalc_rate,
+ .enable = samsung_s3c2410_upll_enable,
+ .disable = samsung_s3c2410_upll_disable,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_s3c2410_pll_set_rate,
+};
+
+static const struct clk_ops samsung_s3c2440_mpll_clk_ops = {
+ .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
+ .enable = samsung_s3c2410_mpll_enable,
+ .disable = samsung_s3c2410_mpll_disable,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_s3c2410_pll_set_rate,
+};
+
+/*
* PLL2550x Clock Type
*/
@@ -710,8 +947,206 @@ struct clk * __init samsung_clk_register_pll2550x(const char *name,
return clk;
}
-static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
- void __iomem *base)
+/*
+ * PLL2550xx Clock Type
+ */
+
+/* Maximum lock time can be 270 * PDIV cycles */
+#define PLL2550XX_LOCK_FACTOR 270
+
+#define PLL2550XX_M_MASK 0x3FF
+#define PLL2550XX_P_MASK 0x3F
+#define PLL2550XX_S_MASK 0x7
+#define PLL2550XX_LOCK_STAT_MASK 0x1
+#define PLL2550XX_M_SHIFT 9
+#define PLL2550XX_P_SHIFT 3
+#define PLL2550XX_S_SHIFT 0
+#define PLL2550XX_LOCK_STAT_SHIFT 21
+
+static unsigned long samsung_pll2550xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK;
+ pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK;
+ sdiv = (pll_con >> PLL2550XX_S_SHIFT) & PLL2550XX_S_MASK;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
+
+static inline bool samsung_pll2550xx_mp_change(u32 mdiv, u32 pdiv, u32 pll_con)
+{
+ u32 old_mdiv, old_pdiv;
+
+ old_mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK;
+ old_pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK;
+
+ return mdiv != old_mdiv || pdiv != old_pdiv;
+}
+
+static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 tmp;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ tmp = __raw_readl(pll->con_reg);
+
+ if (!(samsung_pll2550xx_mp_change(rate->mdiv, rate->pdiv, tmp))) {
+ /* If only s change, change just s value only*/
+ tmp &= ~(PLL2550XX_S_MASK << PLL2550XX_S_SHIFT);
+ tmp |= rate->sdiv << PLL2550XX_S_SHIFT;
+ __raw_writel(tmp, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ __raw_writel(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg);
+
+ /* Change PLL PMS values */
+ tmp &= ~((PLL2550XX_M_MASK << PLL2550XX_M_SHIFT) |
+ (PLL2550XX_P_MASK << PLL2550XX_P_SHIFT) |
+ (PLL2550XX_S_MASK << PLL2550XX_S_SHIFT));
+ tmp |= (rate->mdiv << PLL2550XX_M_SHIFT) |
+ (rate->pdiv << PLL2550XX_P_SHIFT) |
+ (rate->sdiv << PLL2550XX_S_SHIFT);
+ __raw_writel(tmp, pll->con_reg);
+
+ /* wait_lock_time */
+ do {
+ cpu_relax();
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (PLL2550XX_LOCK_STAT_MASK
+ << PLL2550XX_LOCK_STAT_SHIFT)));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll2550xx_clk_ops = {
+ .recalc_rate = samsung_pll2550xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll2550xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll2550xx_clk_min_ops = {
+ .recalc_rate = samsung_pll2550xx_recalc_rate,
+};
+
+/*
+ * PLL2650XX Clock Type
+ */
+
+/* Maximum lock time can be 3000 * PDIV cycles */
+#define PLL2650XX_LOCK_FACTOR 3000
+
+#define PLL2650XX_MDIV_SHIFT 9
+#define PLL2650XX_PDIV_SHIFT 3
+#define PLL2650XX_SDIV_SHIFT 0
+#define PLL2650XX_KDIV_SHIFT 0
+#define PLL2650XX_MDIV_MASK 0x1ff
+#define PLL2650XX_PDIV_MASK 0x3f
+#define PLL2650XX_SDIV_MASK 0x7
+#define PLL2650XX_KDIV_MASK 0xffff
+#define PLL2650XX_PLL_ENABLE_SHIFT 23
+#define PLL2650XX_PLL_LOCKTIME_SHIFT 21
+#define PLL2650XX_PLL_FOUTMASK_SHIFT 31
+
+static unsigned long samsung_pll2650xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con0, pll_con2;
+ s16 kdiv;
+ u64 fvco = parent_rate;
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con2 = __raw_readl(pll->con_reg + 8);
+ mdiv = (pll_con0 >> PLL2650XX_MDIV_SHIFT) & PLL2650XX_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL2650XX_PDIV_SHIFT) & PLL2650XX_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL2650XX_SDIV_SHIFT) & PLL2650XX_SDIV_MASK;
+ kdiv = (s16)(pll_con2 & PLL2650XX_KDIV_MASK);
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
+}
+
+static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 tmp, pll_con0, pll_con2;
+ const struct samsung_pll_rate_table *rate;
+
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con2 = __raw_readl(pll->con_reg + 8);
+
+ /* Change PLL PMS values */
+ pll_con0 &= ~(PLL2650XX_MDIV_MASK << PLL2650XX_MDIV_SHIFT |
+ PLL2650XX_PDIV_MASK << PLL2650XX_PDIV_SHIFT |
+ PLL2650XX_SDIV_MASK << PLL2650XX_SDIV_SHIFT);
+ pll_con0 |= rate->mdiv << PLL2650XX_MDIV_SHIFT;
+ pll_con0 |= rate->pdiv << PLL2650XX_PDIV_SHIFT;
+ pll_con0 |= rate->sdiv << PLL2650XX_SDIV_SHIFT;
+ pll_con0 |= 1 << PLL2650XX_PLL_ENABLE_SHIFT;
+ pll_con0 |= 1 << PLL2650XX_PLL_FOUTMASK_SHIFT;
+
+ pll_con2 &= ~(PLL2650XX_KDIV_MASK << PLL2650XX_KDIV_SHIFT);
+ pll_con2 |= ((~(rate->kdiv) + 1) & PLL2650XX_KDIV_MASK)
+ << PLL2650XX_KDIV_SHIFT;
+
+ /* Set PLL lock time. */
+ __raw_writel(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg);
+
+ __raw_writel(pll_con0, pll->con_reg);
+ __raw_writel(pll_con2, pll->con_reg + 8);
+
+ do {
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT)));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll2650xx_clk_ops = {
+ .recalc_rate = samsung_pll2650xx_recalc_rate,
+ .set_rate = samsung_pll2650xx_set_rate,
+ .round_rate = samsung_pll_round_rate,
+};
+
+static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
+ .recalc_rate = samsung_pll2650xx_recalc_rate,
+};
+
+static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ struct samsung_pll_clock *pll_clk,
+ void __iomem *base)
{
struct samsung_clk_pll *pll;
struct clk *clk;
@@ -746,6 +1181,12 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
}
switch (pll_clk->type) {
+ case pll_2126:
+ init.ops = &samsung_pll2126_clk_ops;
+ break;
+ case pll_3000:
+ init.ops = &samsung_pll3000_clk_ops;
+ break;
/* clk_ops for 35xx and 2550 are similar */
case pll_35xx:
case pll_2550:
@@ -773,6 +1214,7 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
init.ops = &samsung_pll36xx_clk_ops;
break;
case pll_6552:
+ case pll_6552_s3c2416:
init.ops = &samsung_pll6552_clk_ops;
break;
case pll_6553:
@@ -786,6 +1228,36 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
else
init.ops = &samsung_pll46xx_clk_ops;
break;
+ case pll_s3c2410_mpll:
+ if (!pll->rate_table)
+ init.ops = &samsung_s3c2410_mpll_clk_min_ops;
+ else
+ init.ops = &samsung_s3c2410_mpll_clk_ops;
+ break;
+ case pll_s3c2410_upll:
+ if (!pll->rate_table)
+ init.ops = &samsung_s3c2410_upll_clk_min_ops;
+ else
+ init.ops = &samsung_s3c2410_upll_clk_ops;
+ break;
+ case pll_s3c2440_mpll:
+ if (!pll->rate_table)
+ init.ops = &samsung_s3c2440_mpll_clk_min_ops;
+ else
+ init.ops = &samsung_s3c2440_mpll_clk_ops;
+ break;
+ case pll_2550xx:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll2550xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll2550xx_clk_ops;
+ break;
+ case pll_2650xx:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll2650xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll2650xx_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
@@ -804,7 +1276,7 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
return;
}
- samsung_clk_add_lookup(clk, pll_clk->id);
+ samsung_clk_add_lookup(ctx, clk, pll_clk->id);
if (!pll_clk->alias)
return;
@@ -815,11 +1287,12 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
__func__, pll_clk->name, ret);
}
-void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
- unsigned int nr_pll, void __iomem *base)
+void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ struct samsung_pll_clock *pll_list,
+ unsigned int nr_pll, void __iomem *base)
{
int cnt;
for (cnt = 0; cnt < nr_pll; cnt++)
- _samsung_clk_register_pll(&pll_list[cnt], base);
+ _samsung_clk_register_pll(ctx, &pll_list[cnt], base);
}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 6c39030080fb..c0ed4d41fd90 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -13,6 +13,8 @@
#define __SAMSUNG_CLK_PLL_H
enum samsung_pll_type {
+ pll_2126,
+ pll_3000,
pll_35xx,
pll_36xx,
pll_2550,
@@ -24,7 +26,13 @@ enum samsung_pll_type {
pll_4650,
pll_4650c,
pll_6552,
+ pll_6552_s3c2416,
pll_6553,
+ pll_s3c2410_mpll,
+ pll_s3c2410_upll,
+ pll_s3c2440_mpll,
+ pll_2550xx,
+ pll_2650xx,
};
#define PLL_35XX_RATE(_rate, _m, _p, _s) \
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
new file mode 100644
index 000000000000..0449cc0458ed
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for s3c24xx external clock output.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include "clk.h"
+
+/* legacy access to misccr, until dt conversion is finished */
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#define MUX_DCLK0 0
+#define MUX_DCLK1 1
+#define DIV_DCLK0 2
+#define DIV_DCLK1 3
+#define GATE_DCLK0 4
+#define GATE_DCLK1 5
+#define MUX_CLKOUT0 6
+#define MUX_CLKOUT1 7
+#define DCLK_MAX_CLKS (MUX_CLKOUT1 + 1)
+
+enum supported_socs {
+ S3C2410,
+ S3C2412,
+ S3C2440,
+ S3C2443,
+};
+
+struct s3c24xx_dclk_drv_data {
+ const char **clkout0_parent_names;
+ int clkout0_num_parents;
+ const char **clkout1_parent_names;
+ int clkout1_num_parents;
+ const char **mux_parent_names;
+ int mux_num_parents;
+};
+
+/*
+ * Clock for output-parent selection in misccr
+ */
+
+struct s3c24xx_clkout {
+ struct clk_hw hw;
+ u32 mask;
+ u8 shift;
+};
+
+#define to_s3c24xx_clkout(_hw) container_of(_hw, struct s3c24xx_clkout, hw)
+
+static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
+{
+ struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 val;
+
+ val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift;
+ val >>= clkout->shift;
+ val &= clkout->mask;
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
+ int ret = 0;
+
+ s3c2410_modify_misccr((clkout->mask << clkout->shift),
+ (index << clkout->shift));
+
+ return ret;
+}
+
+const struct clk_ops s3c24xx_clkout_ops = {
+ .get_parent = s3c24xx_clkout_get_parent,
+ .set_parent = s3c24xx_clkout_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents,
+ u8 shift, u32 mask)
+{
+ struct s3c24xx_clkout *clkout;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the clkout */
+ clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
+ if (!clkout)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &s3c24xx_clkout_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clkout->shift = shift;
+ clkout->mask = mask;
+ clkout->hw.init = &init;
+
+ clk = clk_register(dev, &clkout->hw);
+
+ return clk;
+}
+
+/*
+ * dclk and clkout init
+ */
+
+struct s3c24xx_dclk {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_onecell_data clk_data;
+ struct notifier_block dclk0_div_change_nb;
+ struct notifier_block dclk1_div_change_nb;
+ spinlock_t dclk_lock;
+ unsigned long reg_save;
+};
+
+#define to_s3c24xx_dclk0(x) \
+ container_of(x, struct s3c24xx_dclk, dclk0_div_change_nb)
+
+#define to_s3c24xx_dclk1(x) \
+ container_of(x, struct s3c24xx_dclk, dclk1_div_change_nb)
+
+static const char *dclk_s3c2410_p[] = { "pclk", "uclk" };
+static const char *clkout0_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
+ "gate_dclk0" };
+static const char *clkout1_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
+ "gate_dclk1" };
+
+static const char *clkout0_s3c2412_p[] = { "mpll", "upll", "rtc_clkout",
+ "hclk", "pclk", "gate_dclk0" };
+static const char *clkout1_s3c2412_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
+ "gate_dclk1" };
+
+static const char *clkout0_s3c2440_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
+ "gate_dclk0" };
+static const char *clkout1_s3c2440_p[] = { "mpll", "upll", "rtc_clkout",
+ "hclk", "pclk", "gate_dclk1" };
+
+static const char *dclk_s3c2443_p[] = { "pclk", "epll" };
+static const char *clkout0_s3c2443_p[] = { "xti", "epll", "armclk", "hclk", "pclk",
+ "gate_dclk0" };
+static const char *clkout1_s3c2443_p[] = { "dummy", "epll", "rtc_clkout",
+ "hclk", "pclk", "gate_dclk1" };
+
+#define DCLKCON_DCLK_DIV_MASK 0xf
+#define DCLKCON_DCLK0_DIV_SHIFT 4
+#define DCLKCON_DCLK0_CMP_SHIFT 8
+#define DCLKCON_DCLK1_DIV_SHIFT 20
+#define DCLKCON_DCLK1_CMP_SHIFT 24
+
+static void s3c24xx_dclk_update_cmp(struct s3c24xx_dclk *s3c24xx_dclk,
+ int div_shift, int cmp_shift)
+{
+ unsigned long flags = 0;
+ u32 dclk_con, div, cmp;
+
+ spin_lock_irqsave(&s3c24xx_dclk->dclk_lock, flags);
+
+ dclk_con = readl_relaxed(s3c24xx_dclk->base);
+
+ div = ((dclk_con >> div_shift) & DCLKCON_DCLK_DIV_MASK) + 1;
+ cmp = ((div + 1) / 2) - 1;
+
+ dclk_con &= ~(DCLKCON_DCLK_DIV_MASK << cmp_shift);
+ dclk_con |= (cmp << cmp_shift);
+
+ writel_relaxed(dclk_con, s3c24xx_dclk->base);
+
+ spin_unlock_irqrestore(&s3c24xx_dclk->dclk_lock, flags);
+}
+
+static int s3c24xx_dclk0_div_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk0(nb);
+
+ if (event == POST_RATE_CHANGE) {
+ s3c24xx_dclk_update_cmp(s3c24xx_dclk,
+ DCLKCON_DCLK0_DIV_SHIFT, DCLKCON_DCLK0_CMP_SHIFT);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int s3c24xx_dclk1_div_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk1(nb);
+
+ if (event == POST_RATE_CHANGE) {
+ s3c24xx_dclk_update_cmp(s3c24xx_dclk,
+ DCLKCON_DCLK1_DIV_SHIFT, DCLKCON_DCLK1_CMP_SHIFT);
+ }
+
+ return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s3c24xx_dclk_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
+
+ s3c24xx_dclk->reg_save = readl_relaxed(s3c24xx_dclk->base);
+ return 0;
+}
+
+static int s3c24xx_dclk_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
+
+ writel_relaxed(s3c24xx_dclk->reg_save, s3c24xx_dclk->base);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops,
+ s3c24xx_dclk_suspend, s3c24xx_dclk_resume);
+
+static int s3c24xx_dclk_probe(struct platform_device *pdev)
+{
+ struct s3c24xx_dclk *s3c24xx_dclk;
+ struct resource *mem;
+ struct clk **clk_table;
+ struct s3c24xx_dclk_drv_data *dclk_variant;
+ int ret, i;
+
+ s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk),
+ GFP_KERNEL);
+ if (!s3c24xx_dclk)
+ return -ENOMEM;
+
+ s3c24xx_dclk->dev = &pdev->dev;
+ platform_set_drvdata(pdev, s3c24xx_dclk);
+ spin_lock_init(&s3c24xx_dclk->dclk_lock);
+
+ clk_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * DCLK_MAX_CLKS,
+ GFP_KERNEL);
+ if (!clk_table)
+ return -ENOMEM;
+
+ s3c24xx_dclk->clk_data.clks = clk_table;
+ s3c24xx_dclk->clk_data.clk_num = DCLK_MAX_CLKS;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ s3c24xx_dclk->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(s3c24xx_dclk->base))
+ return PTR_ERR(s3c24xx_dclk->base);
+
+ dclk_variant = (struct s3c24xx_dclk_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+
+
+ clk_table[MUX_DCLK0] = clk_register_mux(&pdev->dev, "mux_dclk0",
+ dclk_variant->mux_parent_names,
+ dclk_variant->mux_num_parents, 0,
+ s3c24xx_dclk->base, 1, 1, 0,
+ &s3c24xx_dclk->dclk_lock);
+ clk_table[MUX_DCLK1] = clk_register_mux(&pdev->dev, "mux_dclk1",
+ dclk_variant->mux_parent_names,
+ dclk_variant->mux_num_parents, 0,
+ s3c24xx_dclk->base, 17, 1, 0,
+ &s3c24xx_dclk->dclk_lock);
+
+ clk_table[DIV_DCLK0] = clk_register_divider(&pdev->dev, "div_dclk0",
+ "mux_dclk0", 0, s3c24xx_dclk->base,
+ 4, 4, 0, &s3c24xx_dclk->dclk_lock);
+ clk_table[DIV_DCLK1] = clk_register_divider(&pdev->dev, "div_dclk1",
+ "mux_dclk1", 0, s3c24xx_dclk->base,
+ 20, 4, 0, &s3c24xx_dclk->dclk_lock);
+
+ clk_table[GATE_DCLK0] = clk_register_gate(&pdev->dev, "gate_dclk0",
+ "div_dclk0", CLK_SET_RATE_PARENT,
+ s3c24xx_dclk->base, 0, 0,
+ &s3c24xx_dclk->dclk_lock);
+ clk_table[GATE_DCLK1] = clk_register_gate(&pdev->dev, "gate_dclk1",
+ "div_dclk1", CLK_SET_RATE_PARENT,
+ s3c24xx_dclk->base, 16, 0,
+ &s3c24xx_dclk->dclk_lock);
+
+ clk_table[MUX_CLKOUT0] = s3c24xx_register_clkout(&pdev->dev,
+ "clkout0", dclk_variant->clkout0_parent_names,
+ dclk_variant->clkout0_num_parents, 4, 7);
+ clk_table[MUX_CLKOUT1] = s3c24xx_register_clkout(&pdev->dev,
+ "clkout1", dclk_variant->clkout1_parent_names,
+ dclk_variant->clkout1_num_parents, 8, 7);
+
+ for (i = 0; i < DCLK_MAX_CLKS; i++)
+ if (IS_ERR(clk_table[i])) {
+ dev_err(&pdev->dev, "clock %d failed to register\n", i);
+ ret = PTR_ERR(clk_table[i]);
+ goto err_clk_register;
+ }
+
+ ret = clk_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL);
+ if (!ret)
+ ret = clk_register_clkdev(clk_table[MUX_DCLK1], "dclk1", NULL);
+ if (!ret)
+ ret = clk_register_clkdev(clk_table[MUX_CLKOUT0],
+ "clkout0", NULL);
+ if (!ret)
+ ret = clk_register_clkdev(clk_table[MUX_CLKOUT1],
+ "clkout1", NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register aliases, %d\n", ret);
+ goto err_clk_register;
+ }
+
+ s3c24xx_dclk->dclk0_div_change_nb.notifier_call =
+ s3c24xx_dclk0_div_notify;
+
+ s3c24xx_dclk->dclk1_div_change_nb.notifier_call =
+ s3c24xx_dclk1_div_notify;
+
+ ret = clk_notifier_register(clk_table[DIV_DCLK0],
+ &s3c24xx_dclk->dclk0_div_change_nb);
+ if (ret)
+ goto err_clk_register;
+
+ ret = clk_notifier_register(clk_table[DIV_DCLK1],
+ &s3c24xx_dclk->dclk1_div_change_nb);
+ if (ret)
+ goto err_dclk_notify;
+
+ return 0;
+
+err_dclk_notify:
+ clk_notifier_unregister(clk_table[DIV_DCLK0],
+ &s3c24xx_dclk->dclk0_div_change_nb);
+err_clk_register:
+ for (i = 0; i < DCLK_MAX_CLKS; i++)
+ if (clk_table[i] && !IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+
+ return ret;
+}
+
+static int s3c24xx_dclk_remove(struct platform_device *pdev)
+{
+ struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
+ struct clk **clk_table = s3c24xx_dclk->clk_data.clks;
+ int i;
+
+ clk_notifier_unregister(clk_table[DIV_DCLK1],
+ &s3c24xx_dclk->dclk1_div_change_nb);
+ clk_notifier_unregister(clk_table[DIV_DCLK0],
+ &s3c24xx_dclk->dclk0_div_change_nb);
+
+ for (i = 0; i < DCLK_MAX_CLKS; i++)
+ clk_unregister(clk_table[i]);
+
+ return 0;
+}
+
+static struct s3c24xx_dclk_drv_data dclk_variants[] = {
+ [S3C2410] = {
+ .clkout0_parent_names = clkout0_s3c2410_p,
+ .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2410_p),
+ .clkout1_parent_names = clkout1_s3c2410_p,
+ .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2410_p),
+ .mux_parent_names = dclk_s3c2410_p,
+ .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
+ },
+ [S3C2412] = {
+ .clkout0_parent_names = clkout0_s3c2412_p,
+ .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2412_p),
+ .clkout1_parent_names = clkout1_s3c2412_p,
+ .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2412_p),
+ .mux_parent_names = dclk_s3c2410_p,
+ .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
+ },
+ [S3C2440] = {
+ .clkout0_parent_names = clkout0_s3c2440_p,
+ .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2440_p),
+ .clkout1_parent_names = clkout1_s3c2440_p,
+ .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2440_p),
+ .mux_parent_names = dclk_s3c2410_p,
+ .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
+ },
+ [S3C2443] = {
+ .clkout0_parent_names = clkout0_s3c2443_p,
+ .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2443_p),
+ .clkout1_parent_names = clkout1_s3c2443_p,
+ .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2443_p),
+ .mux_parent_names = dclk_s3c2443_p,
+ .mux_num_parents = ARRAY_SIZE(dclk_s3c2443_p),
+ },
+};
+
+static struct platform_device_id s3c24xx_dclk_driver_ids[] = {
+ {
+ .name = "s3c2410-dclk",
+ .driver_data = (kernel_ulong_t)&dclk_variants[S3C2410],
+ }, {
+ .name = "s3c2412-dclk",
+ .driver_data = (kernel_ulong_t)&dclk_variants[S3C2412],
+ }, {
+ .name = "s3c2440-dclk",
+ .driver_data = (kernel_ulong_t)&dclk_variants[S3C2440],
+ }, {
+ .name = "s3c2443-dclk",
+ .driver_data = (kernel_ulong_t)&dclk_variants[S3C2443],
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids);
+
+static struct platform_driver s3c24xx_dclk_driver = {
+ .driver = {
+ .name = "s3c24xx-dclk",
+ .owner = THIS_MODULE,
+ .pm = &s3c24xx_dclk_pm_ops,
+ },
+ .probe = s3c24xx_dclk_probe,
+ .remove = s3c24xx_dclk_remove,
+ .id_table = s3c24xx_dclk_driver_ids,
+};
+module_platform_driver(s3c24xx_dclk_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Driver for the S3C24XX external clock outputs");
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
new file mode 100644
index 000000000000..ba0716801db2
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for S3C2410 and following SoCs.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/s3c2410.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define LOCKTIME 0x00
+#define MPLLCON 0x04
+#define UPLLCON 0x08
+#define CLKCON 0x0c
+#define CLKSLOW 0x10
+#define CLKDIVN 0x14
+#define CAMDIVN 0x18
+
+/* the soc types */
+enum supported_socs {
+ S3C2410,
+ S3C2440,
+ S3C2442,
+};
+
+/* list of PLLs to be registered */
+enum s3c2410_plls {
+ mpll, upll,
+};
+
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *s3c2410_save;
+
+/*
+ * list of controller registers to be saved and restored during a
+ * suspend/resume cycle.
+ */
+static unsigned long s3c2410_clk_regs[] __initdata = {
+ LOCKTIME,
+ MPLLCON,
+ UPLLCON,
+ CLKCON,
+ CLKSLOW,
+ CLKDIVN,
+ CAMDIVN,
+};
+
+static int s3c2410_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, s3c2410_save,
+ ARRAY_SIZE(s3c2410_clk_regs));
+
+ return 0;
+}
+
+static void s3c2410_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, s3c2410_save,
+ ARRAY_SIZE(s3c2410_clk_regs));
+}
+
+static struct syscore_ops s3c2410_clk_syscore_ops = {
+ .suspend = s3c2410_clk_suspend,
+ .resume = s3c2410_clk_resume,
+};
+
+static void s3c2410_clk_sleep_init(void)
+{
+ s3c2410_save = samsung_clk_alloc_reg_dump(s3c2410_clk_regs,
+ ARRAY_SIZE(s3c2410_clk_regs));
+ if (!s3c2410_save) {
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
+ }
+
+ register_syscore_ops(&s3c2410_clk_syscore_ops);
+ return;
+}
+#else
+static void s3c2410_clk_sleep_init(void) {}
+#endif
+
+PNAME(fclk_p) = { "mpll", "div_slow" };
+
+struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
+ MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1),
+};
+
+static struct clk_div_table divslow_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+ { .val = 4, .div = 8 },
+ { .val = 5, .div = 10 },
+ { .val = 6, .div = 12 },
+ { .val = 7, .div = 14 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
+ DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d),
+ DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1),
+};
+
+struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
+ GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0),
+ GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0),
+ GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0),
+ GATE(PCLK_ADC, "adc", "pclk", CLKCON, 15, 0, 0),
+ GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 14, 0, 0),
+ GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 12, 0, 0),
+ GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 11, 0, 0),
+ GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 10, 0, 0),
+ GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 9, 0, 0),
+ GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 8, 0, 0),
+ GATE(HCLK_USBD, "usb-device", "hclk", CLKCON, 7, 0, 0),
+ GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
+ GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
+ GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
+};
+
+/* should be added _after_ the soc-specific clocks are created */
+struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
+ ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
+ ALIAS(PCLK_ADC, NULL, "adc"),
+ ALIAS(PCLK_RTC, NULL, "rtc"),
+ ALIAS(PCLK_PWM, NULL, "timers"),
+ ALIAS(HCLK_LCD, NULL, "lcd"),
+ ALIAS(HCLK_USBD, NULL, "usb-device"),
+ ALIAS(HCLK_USBH, NULL, "usb-host"),
+ ALIAS(UCLK, NULL, "usb-bus-host"),
+ ALIAS(UCLK, NULL, "usb-bus-gadget"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(UCLK, NULL, "uclk"),
+ ALIAS(HCLK, NULL, "hclk"),
+ ALIAS(MPLL, NULL, "mpll"),
+ ALIAS(FCLK, NULL, "fclk"),
+};
+
+/* S3C2410 specific clocks */
+
+static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* 2410A extras */
+ PLL_35XX_RATE(270000000, 127, 1, 1),
+ PLL_35XX_RATE(268000000, 126, 1, 1),
+ PLL_35XX_RATE(266000000, 125, 1, 1),
+ PLL_35XX_RATE(226000000, 105, 1, 1),
+ PLL_35XX_RATE(210000000, 132, 2, 1),
+ /* 2410 common */
+ PLL_35XX_RATE(203000000, 161, 3, 1),
+ PLL_35XX_RATE(192000000, 88, 1, 1),
+ PLL_35XX_RATE(186000000, 85, 1, 1),
+ PLL_35XX_RATE(180000000, 82, 1, 1),
+ PLL_35XX_RATE(170000000, 77, 1, 1),
+ PLL_35XX_RATE(158000000, 71, 1, 1),
+ PLL_35XX_RATE(152000000, 68, 1, 1),
+ PLL_35XX_RATE(147000000, 90, 2, 1),
+ PLL_35XX_RATE(135000000, 82, 2, 1),
+ PLL_35XX_RATE(124000000, 116, 1, 2),
+ PLL_35XX_RATE(118000000, 150, 2, 2),
+ PLL_35XX_RATE(113000000, 105, 1, 2),
+ PLL_35XX_RATE(101000000, 127, 2, 2),
+ PLL_35XX_RATE(90000000, 112, 2, 2),
+ PLL_35XX_RATE(85000000, 105, 2, 2),
+ PLL_35XX_RATE(79000000, 71, 1, 2),
+ PLL_35XX_RATE(68000000, 82, 2, 2),
+ PLL_35XX_RATE(56000000, 142, 2, 3),
+ PLL_35XX_RATE(48000000, 120, 2, 3),
+ PLL_35XX_RATE(51000000, 161, 3, 3),
+ PLL_35XX_RATE(45000000, 82, 1, 3),
+ PLL_35XX_RATE(34000000, 82, 2, 3),
+ { /* sentinel */ },
+};
+
+static struct samsung_pll_clock s3c2410_plls[] __initdata = {
+ [mpll] = PLL(pll_s3c2410_mpll, MPLL, "mpll", "xti",
+ LOCKTIME, MPLLCON, NULL),
+ [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
+ LOCKTIME, UPLLCON, NULL),
+};
+
+struct samsung_div_clock s3c2410_dividers[] __initdata = {
+ DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1),
+};
+
+struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
+ /*
+ * armclk is directly supplied by the fclk, without
+ * switching possibility like on the s3c244x below.
+ */
+ FFACTOR(ARMCLK, "armclk", "fclk", 1, 1, 0),
+
+ /* uclk is fed from the unmodified upll */
+ FFACTOR(UCLK, "uclk", "upll", 1, 1, 0),
+};
+
+struct samsung_clock_alias s3c2410_aliases[] __initdata = {
+ ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"),
+ ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"),
+ ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"),
+ ALIAS(PCLK_UART0, "s3c2410-uart.0", "clk_uart_baud0"),
+ ALIAS(PCLK_UART1, "s3c2410-uart.1", "clk_uart_baud0"),
+ ALIAS(PCLK_UART2, "s3c2410-uart.2", "clk_uart_baud0"),
+ ALIAS(UCLK, NULL, "clk_uart_baud1"),
+};
+
+/* S3C244x specific clocks */
+
+static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ PLL_35XX_RATE(400000000, 0x5c, 1, 1),
+ PLL_35XX_RATE(390000000, 0x7a, 2, 1),
+ PLL_35XX_RATE(380000000, 0x57, 1, 1),
+ PLL_35XX_RATE(370000000, 0xb1, 4, 1),
+ PLL_35XX_RATE(360000000, 0x70, 2, 1),
+ PLL_35XX_RATE(350000000, 0xa7, 4, 1),
+ PLL_35XX_RATE(340000000, 0x4d, 1, 1),
+ PLL_35XX_RATE(330000000, 0x66, 2, 1),
+ PLL_35XX_RATE(320000000, 0x98, 4, 1),
+ PLL_35XX_RATE(310000000, 0x93, 4, 1),
+ PLL_35XX_RATE(300000000, 0x75, 3, 1),
+ PLL_35XX_RATE(240000000, 0x70, 1, 2),
+ PLL_35XX_RATE(230000000, 0x6b, 1, 2),
+ PLL_35XX_RATE(220000000, 0x66, 1, 2),
+ PLL_35XX_RATE(210000000, 0x84, 2, 2),
+ PLL_35XX_RATE(200000000, 0x5c, 1, 2),
+ PLL_35XX_RATE(190000000, 0x57, 1, 2),
+ PLL_35XX_RATE(180000000, 0x70, 2, 2),
+ PLL_35XX_RATE(170000000, 0x4d, 1, 2),
+ PLL_35XX_RATE(160000000, 0x98, 4, 2),
+ PLL_35XX_RATE(150000000, 0x75, 3, 2),
+ PLL_35XX_RATE(120000000, 0x70, 1, 3),
+ PLL_35XX_RATE(110000000, 0x66, 1, 3),
+ PLL_35XX_RATE(100000000, 0x5c, 1, 3),
+ PLL_35XX_RATE(90000000, 0x70, 2, 3),
+ PLL_35XX_RATE(80000000, 0x98, 4, 3),
+ PLL_35XX_RATE(75000000, 0x75, 3, 3),
+ { /* sentinel */ },
+};
+
+static struct samsung_pll_clock s3c244x_common_plls[] __initdata = {
+ [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
+ LOCKTIME, MPLLCON, NULL),
+ [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
+ LOCKTIME, UPLLCON, NULL),
+};
+
+PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" };
+PNAME(armclk_p) = { "fclk", "hclk" };
+
+struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
+ MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2),
+ MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1),
+};
+
+struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
+ FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0),
+ FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_div_table div_hclk_4_d[] = {
+ { .val = 0, .div = 4 },
+ { .val = 1, .div = 8 },
+ { /* sentinel */ },
+};
+
+static struct clk_div_table div_hclk_3_d[] = {
+ { .val = 0, .div = 3 },
+ { .val = 1, .div = 6 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
+ DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1),
+ DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1),
+ DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d),
+ DIV_T(0, "div_hclk_3", "fclk", CAMDIVN, 8, 1, div_hclk_3_d),
+ DIV(0, "div_cam", "upll", CAMDIVN, 0, 3),
+};
+
+struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
+ GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0),
+};
+
+struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
+ ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
+ ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
+ ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
+ ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
+ ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
+ ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
+ ALIAS(HCLK_CAM, NULL, "camif"),
+ ALIAS(CAMIF, NULL, "camif-upll"),
+};
+
+/* S3C2440 specific clocks */
+
+PNAME(s3c2440_camif_p) = { "upll", "ff_cam" };
+
+struct samsung_mux_clock s3c2440_muxes[] __initdata = {
+ MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1),
+};
+
+struct samsung_gate_clock s3c2440_gates[] __initdata = {
+ GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0),
+};
+
+/* S3C2442 specific clocks */
+
+struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
+ FFACTOR(0, "upll_3", "upll", 1, 3, 0),
+};
+
+PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" };
+
+struct samsung_mux_clock s3c2442_muxes[] __initdata = {
+ MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2),
+};
+
+/*
+ * fixed rate clocks generated outside the soc
+ * Only necessary until the devicetree-move is complete
+ */
+#define XTI 1
+struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
+ FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
+};
+
+static void __init s3c2410_common_clk_register_fixed_ext(
+ struct samsung_clk_provider *ctx,
+ unsigned long xti_f)
+{
+ struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
+
+ s3c2410_common_frate_clks[0].fixed_rate = xti_f;
+ samsung_clk_register_fixed_rate(ctx, s3c2410_common_frate_clks,
+ ARRAY_SIZE(s3c2410_common_frate_clks));
+
+ samsung_clk_register_alias(ctx, &xti_alias, 1);
+}
+
+void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
+ int current_soc,
+ void __iomem *base)
+{
+ struct samsung_clk_provider *ctx;
+ reg_base = base;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ /* Register external clocks only in non-dt cases */
+ if (!np)
+ s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
+
+ if (current_soc == 2410) {
+ if (_get_rate("xti") == 12 * MHZ) {
+ s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
+ s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
+ }
+
+ /* Register PLLs. */
+ samsung_clk_register_pll(ctx, s3c2410_plls,
+ ARRAY_SIZE(s3c2410_plls), reg_base);
+
+ } else { /* S3C2440, S3C2442 */
+ if (_get_rate("xti") == 12 * MHZ) {
+ /*
+ * plls follow different calculation schemes, with the
+ * upll following the same scheme as the s3c2410 plls
+ */
+ s3c244x_common_plls[mpll].rate_table =
+ pll_s3c244x_12mhz_tbl;
+ s3c244x_common_plls[upll].rate_table =
+ pll_s3c2410_12mhz_tbl;
+ }
+
+ /* Register PLLs. */
+ samsung_clk_register_pll(ctx, s3c244x_common_plls,
+ ARRAY_SIZE(s3c244x_common_plls), reg_base);
+ }
+
+ /* Register common internal clocks. */
+ samsung_clk_register_mux(ctx, s3c2410_common_muxes,
+ ARRAY_SIZE(s3c2410_common_muxes));
+ samsung_clk_register_div(ctx, s3c2410_common_dividers,
+ ARRAY_SIZE(s3c2410_common_dividers));
+ samsung_clk_register_gate(ctx, s3c2410_common_gates,
+ ARRAY_SIZE(s3c2410_common_gates));
+
+ if (current_soc == S3C2440 || current_soc == S3C2442) {
+ samsung_clk_register_div(ctx, s3c244x_common_dividers,
+ ARRAY_SIZE(s3c244x_common_dividers));
+ samsung_clk_register_gate(ctx, s3c244x_common_gates,
+ ARRAY_SIZE(s3c244x_common_gates));
+ samsung_clk_register_mux(ctx, s3c244x_common_muxes,
+ ARRAY_SIZE(s3c244x_common_muxes));
+ samsung_clk_register_fixed_factor(ctx, s3c244x_common_ffactor,
+ ARRAY_SIZE(s3c244x_common_ffactor));
+ }
+
+ /* Register SoC-specific clocks. */
+ switch (current_soc) {
+ case S3C2410:
+ samsung_clk_register_div(ctx, s3c2410_dividers,
+ ARRAY_SIZE(s3c2410_dividers));
+ samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
+ ARRAY_SIZE(s3c2410_ffactor));
+ samsung_clk_register_alias(ctx, s3c2410_aliases,
+ ARRAY_SIZE(s3c2410_common_aliases));
+ break;
+ case S3C2440:
+ samsung_clk_register_mux(ctx, s3c2440_muxes,
+ ARRAY_SIZE(s3c2440_muxes));
+ samsung_clk_register_gate(ctx, s3c2440_gates,
+ ARRAY_SIZE(s3c2440_gates));
+ break;
+ case S3C2442:
+ samsung_clk_register_mux(ctx, s3c2442_muxes,
+ ARRAY_SIZE(s3c2442_muxes));
+ samsung_clk_register_fixed_factor(ctx, s3c2442_ffactor,
+ ARRAY_SIZE(s3c2442_ffactor));
+ break;
+ }
+
+ /*
+ * Register common aliases at the end, as some of the aliased clocks
+ * are SoC specific.
+ */
+ samsung_clk_register_alias(ctx, s3c2410_common_aliases,
+ ARRAY_SIZE(s3c2410_common_aliases));
+
+ if (current_soc == S3C2440 || current_soc == S3C2442) {
+ samsung_clk_register_alias(ctx, s3c244x_common_aliases,
+ ARRAY_SIZE(s3c244x_common_aliases));
+ }
+
+ s3c2410_clk_sleep_init();
+}
+
+static void __init s3c2410_clk_init(struct device_node *np)
+{
+ s3c2410_common_clk_init(np, 0, S3C2410, 0);
+}
+CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init);
+
+static void __init s3c2440_clk_init(struct device_node *np)
+{
+ s3c2410_common_clk_init(np, 0, S3C2440, 0);
+}
+CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init);
+
+static void __init s3c2442_clk_init(struct device_node *np)
+{
+ s3c2410_common_clk_init(np, 0, S3C2442, 0);
+}
+CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
new file mode 100644
index 000000000000..23e4313f625e
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for S3C2412 and S3C2413.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/s3c2412.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define LOCKTIME 0x00
+#define MPLLCON 0x04
+#define UPLLCON 0x08
+#define CLKCON 0x0c
+#define CLKDIVN 0x14
+#define CLKSRC 0x1c
+
+/* list of PLLs to be registered */
+enum s3c2412_plls {
+ mpll, upll,
+};
+
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *s3c2412_save;
+
+/*
+ * list of controller registers to be saved and restored during a
+ * suspend/resume cycle.
+ */
+static unsigned long s3c2412_clk_regs[] __initdata = {
+ LOCKTIME,
+ MPLLCON,
+ UPLLCON,
+ CLKCON,
+ CLKDIVN,
+ CLKSRC,
+};
+
+static int s3c2412_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, s3c2412_save,
+ ARRAY_SIZE(s3c2412_clk_regs));
+
+ return 0;
+}
+
+static void s3c2412_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, s3c2412_save,
+ ARRAY_SIZE(s3c2412_clk_regs));
+}
+
+static struct syscore_ops s3c2412_clk_syscore_ops = {
+ .suspend = s3c2412_clk_suspend,
+ .resume = s3c2412_clk_resume,
+};
+
+static void s3c2412_clk_sleep_init(void)
+{
+ s3c2412_save = samsung_clk_alloc_reg_dump(s3c2412_clk_regs,
+ ARRAY_SIZE(s3c2412_clk_regs));
+ if (!s3c2412_save) {
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
+ }
+
+ register_syscore_ops(&s3c2412_clk_syscore_ops);
+ return;
+}
+#else
+static void s3c2412_clk_sleep_init(void) {}
+#endif
+
+static struct clk_div_table divxti_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+ { .val = 4, .div = 8 },
+ { .val = 5, .div = 10 },
+ { .val = 6, .div = 12 },
+ { .val = 7, .div = 14 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c2412_dividers[] __initdata = {
+ DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d),
+ DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4),
+ DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4),
+ DIV(0, "div_uart", "mux_uart", CLKDIVN, 8, 4),
+ DIV(0, "div_usb", "mux_usb", CLKDIVN, 6, 1),
+ DIV(0, "div_hclk_half", "hclk", CLKDIVN, 5, 1),
+ DIV(ARMDIV, "armdiv", "msysclk", CLKDIVN, 3, 1),
+ DIV(PCLK, "pclk", "hclk", CLKDIVN, 2, 1),
+ DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2),
+};
+
+struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
+ FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT),
+};
+
+/*
+ * The first two use the OM[4] setting, which is not readable from
+ * software, so assume it is set to xti.
+ */
+PNAME(erefclk_p) = { "xti", "xti", "xti", "ext" };
+PNAME(urefclk_p) = { "xti", "xti", "xti", "ext" };
+
+PNAME(camclk_p) = { "usysclk", "hclk" };
+PNAME(usbclk_p) = { "usysclk", "hclk" };
+PNAME(i2sclk_p) = { "erefclk", "mpll" };
+PNAME(uartclk_p) = { "erefclk", "mpll" };
+PNAME(usysclk_p) = { "urefclk", "upll" };
+PNAME(msysclk_p) = { "mdivclk", "mpll" };
+PNAME(mdivclk_p) = { "xti", "div_xti" };
+PNAME(armclk_p) = { "armdiv", "hclk" };
+
+struct samsung_mux_clock s3c2412_muxes[] __initdata = {
+ MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2),
+ MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2),
+ MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1),
+ MUX(0, "mux_usb", usbclk_p, CLKSRC, 10, 1),
+ MUX(0, "mux_i2s", i2sclk_p, CLKSRC, 9, 1),
+ MUX(0, "mux_uart", uartclk_p, CLKSRC, 8, 1),
+ MUX(USYSCLK, "usysclk", usysclk_p, CLKSRC, 5, 1),
+ MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
+ MUX(MDIVCLK, "mdivclk", mdivclk_p, CLKSRC, 3, 1),
+ MUX(ARMCLK, "armclk", armclk_p, CLKDIVN, 4, 1),
+};
+
+static struct samsung_pll_clock s3c2412_plls[] __initdata = {
+ [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
+ LOCKTIME, MPLLCON, NULL),
+ [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk",
+ LOCKTIME, UPLLCON, NULL),
+};
+
+struct samsung_gate_clock s3c2412_gates[] __initdata = {
+ GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0),
+ GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0),
+ GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0),
+ GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 25, 0, 0),
+ GATE(PCLK_ADC, "adc", "pclk", CLKCON, 24, 0, 0),
+ GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 23, 0, 0),
+ GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 22, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 21, 0, 0),
+ GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 20, 0, 0),
+ GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 19, 0, 0),
+ GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 18, 0, 0),
+ GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 17, 0, 0),
+ GATE(PCLK_USBD, "usb-device", "pclk", CLKCON, 16, 0, 0),
+ GATE(SCLK_CAM, "sclk_cam", "div_cam", CLKCON, 15, 0, 0),
+ GATE(SCLK_UART, "sclk_uart", "div_uart", CLKCON, 14, 0, 0),
+ GATE(SCLK_I2S, "sclk_i2s", "div_i2s", CLKCON, 13, 0, 0),
+ GATE(SCLK_USBH, "sclk_usbh", "div_usb", CLKCON, 12, 0, 0),
+ GATE(SCLK_USBD, "sclk_usbd", "div_usb", CLKCON, 11, 0, 0),
+ GATE(HCLK_HALF, "hclk_half", "div_hclk_half", CLKCON, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_X2, "hclkx2", "ff_hclk", CLKCON, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_SDRAM, "sdram", "hclk", CLKCON, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
+ GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
+ GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
+ GATE(HCLK_DMA3, "dma3", "hclk", CLKCON, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA2, "dma2", "hclk", CLKCON, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA1, "dma1", "hclk", CLKCON, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+struct samsung_clock_alias s3c2412_aliases[] __initdata = {
+ ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"),
+ ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"),
+ ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"),
+ ALIAS(PCLK_UART0, "s3c2412-uart.0", "clk_uart_baud2"),
+ ALIAS(PCLK_UART1, "s3c2412-uart.1", "clk_uart_baud2"),
+ ALIAS(PCLK_UART2, "s3c2412-uart.2", "clk_uart_baud2"),
+ ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
+ ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
+ ALIAS(PCLK_ADC, NULL, "adc"),
+ ALIAS(PCLK_RTC, NULL, "rtc"),
+ ALIAS(PCLK_PWM, NULL, "timers"),
+ ALIAS(HCLK_LCD, NULL, "lcd"),
+ ALIAS(PCLK_USBD, NULL, "usb-device"),
+ ALIAS(SCLK_USBD, NULL, "usb-bus-gadget"),
+ ALIAS(HCLK_USBH, NULL, "usb-host"),
+ ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(HCLK, NULL, "hclk"),
+ ALIAS(MPLL, NULL, "mpll"),
+ ALIAS(MSYSCLK, NULL, "fclk"),
+};
+
+/*
+ * fixed rate clocks generated outside the soc
+ * Only necessary until the devicetree-move is complete
+ */
+#define XTI 1
+struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
+ FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
+};
+
+static void __init s3c2412_common_clk_register_fixed_ext(
+ struct samsung_clk_provider *ctx,
+ unsigned long xti_f, unsigned long ext_f)
+{
+ /* xtal alias is necessary for the current cpufreq driver */
+ struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
+
+ s3c2412_common_frate_clks[0].fixed_rate = xti_f;
+ s3c2412_common_frate_clks[1].fixed_rate = ext_f;
+ samsung_clk_register_fixed_rate(ctx, s3c2412_common_frate_clks,
+ ARRAY_SIZE(s3c2412_common_frate_clks));
+
+ samsung_clk_register_alias(ctx, &xti_alias, 1);
+}
+
+void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
+ unsigned long ext_f, void __iomem *base)
+{
+ struct samsung_clk_provider *ctx;
+ reg_base = base;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ /* Register external clocks only in non-dt cases */
+ if (!np)
+ s3c2412_common_clk_register_fixed_ext(ctx, xti_f, ext_f);
+
+ /* Register PLLs. */
+ samsung_clk_register_pll(ctx, s3c2412_plls, ARRAY_SIZE(s3c2412_plls),
+ reg_base);
+
+ /* Register common internal clocks. */
+ samsung_clk_register_mux(ctx, s3c2412_muxes, ARRAY_SIZE(s3c2412_muxes));
+ samsung_clk_register_div(ctx, s3c2412_dividers,
+ ARRAY_SIZE(s3c2412_dividers));
+ samsung_clk_register_gate(ctx, s3c2412_gates,
+ ARRAY_SIZE(s3c2412_gates));
+ samsung_clk_register_fixed_factor(ctx, s3c2412_ffactor,
+ ARRAY_SIZE(s3c2412_ffactor));
+ samsung_clk_register_alias(ctx, s3c2412_aliases,
+ ARRAY_SIZE(s3c2412_aliases));
+
+ s3c2412_clk_sleep_init();
+}
+
+static void __init s3c2412_clk_init(struct device_node *np)
+{
+ s3c2412_common_clk_init(np, 0, 0, 0);
+}
+CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
new file mode 100644
index 000000000000..c4bbdabebaa4
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for S3C2443 and following SoCs.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/s3c2443.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/* S3C2416 clock controller register offsets */
+#define LOCKCON0 0x00
+#define LOCKCON1 0x04
+#define MPLLCON 0x10
+#define EPLLCON 0x18
+#define EPLLCON_K 0x1C
+#define CLKSRC 0x20
+#define CLKDIV0 0x24
+#define CLKDIV1 0x28
+#define CLKDIV2 0x2C
+#define HCLKCON 0x30
+#define PCLKCON 0x34
+#define SCLKCON 0x38
+
+/* the soc types */
+enum supported_socs {
+ S3C2416,
+ S3C2443,
+ S3C2450,
+};
+
+/* list of PLLs to be registered */
+enum s3c2443_plls {
+ mpll, epll,
+};
+
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *s3c2443_save;
+
+/*
+ * list of controller registers to be saved and restored during a
+ * suspend/resume cycle.
+ */
+static unsigned long s3c2443_clk_regs[] __initdata = {
+ LOCKCON0,
+ LOCKCON1,
+ MPLLCON,
+ EPLLCON,
+ EPLLCON_K,
+ CLKSRC,
+ CLKDIV0,
+ CLKDIV1,
+ CLKDIV2,
+ PCLKCON,
+ HCLKCON,
+ SCLKCON,
+};
+
+static int s3c2443_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, s3c2443_save,
+ ARRAY_SIZE(s3c2443_clk_regs));
+
+ return 0;
+}
+
+static void s3c2443_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, s3c2443_save,
+ ARRAY_SIZE(s3c2443_clk_regs));
+}
+
+static struct syscore_ops s3c2443_clk_syscore_ops = {
+ .suspend = s3c2443_clk_suspend,
+ .resume = s3c2443_clk_resume,
+};
+
+static void s3c2443_clk_sleep_init(void)
+{
+ s3c2443_save = samsung_clk_alloc_reg_dump(s3c2443_clk_regs,
+ ARRAY_SIZE(s3c2443_clk_regs));
+ if (!s3c2443_save) {
+ pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
+ __func__);
+ return;
+ }
+
+ register_syscore_ops(&s3c2443_clk_syscore_ops);
+ return;
+}
+#else
+static void s3c2443_clk_sleep_init(void) {}
+#endif
+
+PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" };
+PNAME(esysclk_p) = { "epllref", "epll" };
+PNAME(mpllref_p) = { "xti", "mdivclk" };
+PNAME(msysclk_p) = { "mpllref", "mpll" };
+PNAME(armclk_p) = { "armdiv" , "hclk" };
+PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" };
+
+struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
+ MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
+ MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
+ MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
+ MUX_A(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1, "msysclk"),
+ MUX_A(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1, "armclk"),
+ MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
+};
+
+static struct clk_div_table hclk_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { /* sentinel */ },
+};
+
+static struct clk_div_table mdivclk_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 3 },
+ { .val = 2, .div = 5 },
+ { .val = 3, .div = 7 },
+ { .val = 4, .div = 9 },
+ { .val = 5, .div = 11 },
+ { .val = 6, .div = 13 },
+ { .val = 7, .div = 15 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
+ DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d),
+ DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2),
+ DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d),
+ DIV(PCLK, "pclk", "hclk", CLKDIV0, 2, 1),
+ DIV(0, "div_hsspi0_epll", "esysclk", CLKDIV1, 24, 2),
+ DIV(0, "div_fimd", "esysclk", CLKDIV1, 16, 8),
+ DIV(0, "div_i2s0", "esysclk", CLKDIV1, 12, 4),
+ DIV(0, "div_uart", "esysclk", CLKDIV1, 8, 4),
+ DIV(0, "div_hsmmc1", "esysclk", CLKDIV1, 6, 2),
+ DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2),
+};
+
+struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
+ GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0),
+ GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0),
+ GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0),
+ GATE(SCLK_I2S0, "sclk_i2s0", "mux_i2s0", SCLKCON, 9, 0, 0),
+ GATE(SCLK_UART, "sclk_uart", "div_uart", SCLKCON, 8, 0, 0),
+ GATE(SCLK_USBH, "sclk_usbhost", "div_usbhost", SCLKCON, 1, 0, 0),
+ GATE(HCLK_DRAM, "dram", "hclk", HCLKCON, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_SSMC, "ssmc", "hclk", HCLKCON, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_HSMMC1, "hsmmc1", "hclk", HCLKCON, 16, 0, 0),
+ GATE(HCLK_USBD, "usb-device", "hclk", HCLKCON, 12, 0, 0),
+ GATE(HCLK_USBH, "usb-host", "hclk", HCLKCON, 11, 0, 0),
+ GATE(HCLK_LCD, "lcd", "hclk", HCLKCON, 9, 0, 0),
+ GATE(HCLK_DMA5, "dma5", "hclk", HCLKCON, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA4, "dma4", "hclk", HCLKCON, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA3, "dma3", "hclk", HCLKCON, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA2, "dma2", "hclk", HCLKCON, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA1, "dma1", "hclk", HCLKCON, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA0, "dma0", "hclk", HCLKCON, 0, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_GPIO, "gpio", "pclk", PCLKCON, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_RTC, "rtc", "pclk", PCLKCON, 12, 0, 0),
+ GATE(PCLK_WDT, "wdt", "pclk", PCLKCON, 11, 0, 0),
+ GATE(PCLK_PWM, "pwm", "pclk", PCLKCON, 10, 0, 0),
+ GATE(PCLK_I2S0, "i2s0", "pclk", PCLKCON, 9, 0, 0),
+ GATE(PCLK_AC97, "ac97", "pclk", PCLKCON, 8, 0, 0),
+ GATE(PCLK_ADC, "adc", "pclk", PCLKCON, 7, 0, 0),
+ GATE(PCLK_SPI0, "spi0", "pclk", PCLKCON, 6, 0, 0),
+ GATE(PCLK_I2C0, "i2c0", "pclk", PCLKCON, 4, 0, 0),
+ GATE(PCLK_UART3, "uart3", "pclk", PCLKCON, 3, 0, 0),
+ GATE(PCLK_UART2, "uart2", "pclk", PCLKCON, 2, 0, 0),
+ GATE(PCLK_UART1, "uart1", "pclk", PCLKCON, 1, 0, 0),
+ GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0),
+};
+
+struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+ ALIAS(HCLK, NULL, "hclk"),
+ ALIAS(HCLK_SSMC, NULL, "nand"),
+ ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
+ ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
+ ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
+ ALIAS(PCLK_UART3, "s3c2440-uart.3", "uart"),
+ ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
+ ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
+ ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
+ ALIAS(PCLK_UART3, "s3c2440-uart.3", "clk_uart_baud2"),
+ ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
+ ALIAS(PCLK_PWM, NULL, "timers"),
+ ALIAS(PCLK_RTC, NULL, "rtc"),
+ ALIAS(PCLK_WDT, NULL, "watchdog"),
+ ALIAS(PCLK_ADC, NULL, "adc"),
+ ALIAS(PCLK_I2C0, "s3c2410-i2c.0", "i2c"),
+ ALIAS(HCLK_USBD, NULL, "usb-device"),
+ ALIAS(HCLK_USBH, NULL, "usb-host"),
+ ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
+ ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi"),
+ ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi_busclk0"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
+ ALIAS(PCLK_I2S0, "samsung-i2s.0", "iis"),
+ ALIAS(SCLK_I2S0, NULL, "i2s-if"),
+ ALIAS(HCLK_LCD, NULL, "lcd"),
+ ALIAS(SCLK_FIMD, NULL, "sclk_fimd"),
+};
+
+/* S3C2416 specific clocks */
+
+static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
+ [mpll] = PLL(pll_6552_s3c2416, 0, "mpll", "mpllref",
+ LOCKCON0, MPLLCON, NULL),
+ [epll] = PLL(pll_6553, 0, "epll", "epllref",
+ LOCKCON1, EPLLCON, NULL),
+};
+
+PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" };
+PNAME(s3c2416_hsmmc1_p) = { "sclk_hsmmc1", "sclk_hsmmcext" };
+PNAME(s3c2416_hsspi0_p) = { "hsspi0_epll", "hsspi0_mpll" };
+
+static struct clk_div_table armdiv_s3c2416_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 5, .div = 6 },
+ { .val = 7, .div = 8 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c2416_dividers[] __initdata = {
+ DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d),
+ DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4),
+ DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2),
+};
+
+struct samsung_mux_clock s3c2416_muxes[] __initdata = {
+ MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1),
+ MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1),
+ MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1),
+};
+
+struct samsung_gate_clock s3c2416_gates[] __initdata = {
+ GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0),
+ GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
+ GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0),
+ GATE(HCLK_2D, "2d", "hclk", HCLKCON, 20, 0, 0),
+ GATE(HCLK_HSMMC0, "hsmmc0", "hclk", HCLKCON, 15, 0, 0),
+ GATE(HCLK_IROM, "irom", "hclk", HCLKCON, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0),
+};
+
+struct samsung_clock_alias s3c2416_aliases[] __initdata = {
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
+ ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"),
+ ALIAS(MUX_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
+ ALIAS(MUX_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
+ ALIAS(ARMDIV, NULL, "armdiv"),
+};
+
+/* S3C2443 specific clocks */
+
+static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
+ [mpll] = PLL(pll_3000, 0, "mpll", "mpllref",
+ LOCKCON0, MPLLCON, NULL),
+ [epll] = PLL(pll_2126, 0, "epll", "epllref",
+ LOCKCON1, EPLLCON, NULL),
+};
+
+static struct clk_div_table armdiv_s3c2443_d[] = {
+ { .val = 0, .div = 1 },
+ { .val = 8, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 9, .div = 4 },
+ { .val = 10, .div = 6 },
+ { .val = 11, .div = 8 },
+ { .val = 13, .div = 12 },
+ { .val = 15, .div = 16 },
+ { /* sentinel */ },
+};
+
+struct samsung_div_clock s3c2443_dividers[] __initdata = {
+ DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d),
+ DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
+};
+
+struct samsung_gate_clock s3c2443_gates[] __initdata = {
+ GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
+ GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0),
+ GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
+ GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 15, 0, 0),
+ GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0),
+};
+
+struct samsung_clock_alias s3c2443_aliases[] __initdata = {
+ ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
+ ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
+ ALIAS(SCLK_CAM, NULL, "camif-upll"),
+ ALIAS(PCLK_SPI1, "s3c2410-spi.0", "spi"),
+ ALIAS(PCLK_SDI, NULL, "sdi"),
+ ALIAS(HCLK_CFC, NULL, "cfc"),
+ ALIAS(ARMDIV, NULL, "armdiv"),
+};
+
+/* S3C2450 specific clocks */
+
+PNAME(s3c2450_cam_p) = { "div_cam", "hclk" };
+PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" };
+PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" };
+
+struct samsung_div_clock s3c2450_dividers[] __initdata = {
+ DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
+ DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2),
+ DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4),
+ DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4),
+};
+
+struct samsung_mux_clock s3c2450_muxes[] __initdata = {
+ MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1),
+ MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1),
+ MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2),
+};
+
+struct samsung_gate_clock s3c2450_gates[] __initdata = {
+ GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0),
+ GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0),
+ GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
+ GATE(HCLK_DMA7, "dma7", "hclk", HCLKCON, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(HCLK_DMA6, "dma6", "hclk", HCLKCON, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(PCLK_I2S1, "i2s1", "pclk", PCLKCON, 17, 0, 0),
+ GATE(PCLK_I2C1, "i2c1", "pclk", PCLKCON, 16, 0, 0),
+ GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0),
+};
+
+struct samsung_clock_alias s3c2450_aliases[] __initdata = {
+ ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"),
+ ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"),
+ ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"),
+ ALIAS(PCLK_I2C1, "s3c2410-i2c.1", "i2c"),
+};
+
+/*
+ * fixed rate clocks generated outside the soc
+ * Only necessary until the devicetree-move is complete
+ */
+struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
+ FRATE(0, "xti", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "ext_i2s", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "ext_uart", NULL, CLK_IS_ROOT, 0),
+};
+
+static void __init s3c2443_common_clk_register_fixed_ext(
+ struct samsung_clk_provider *ctx, unsigned long xti_f)
+{
+ s3c2443_common_frate_clks[0].fixed_rate = xti_f;
+ samsung_clk_register_fixed_rate(ctx, s3c2443_common_frate_clks,
+ ARRAY_SIZE(s3c2443_common_frate_clks));
+}
+
+void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
+ int current_soc,
+ void __iomem *base)
+{
+ struct samsung_clk_provider *ctx;
+ reg_base = base;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ /* Register external clocks only in non-dt cases */
+ if (!np)
+ s3c2443_common_clk_register_fixed_ext(ctx, xti_f);
+
+ /* Register PLLs. */
+ if (current_soc == S3C2416 || current_soc == S3C2450)
+ samsung_clk_register_pll(ctx, s3c2416_pll_clks,
+ ARRAY_SIZE(s3c2416_pll_clks), reg_base);
+ else
+ samsung_clk_register_pll(ctx, s3c2443_pll_clks,
+ ARRAY_SIZE(s3c2443_pll_clks), reg_base);
+
+ /* Register common internal clocks. */
+ samsung_clk_register_mux(ctx, s3c2443_common_muxes,
+ ARRAY_SIZE(s3c2443_common_muxes));
+ samsung_clk_register_div(ctx, s3c2443_common_dividers,
+ ARRAY_SIZE(s3c2443_common_dividers));
+ samsung_clk_register_gate(ctx, s3c2443_common_gates,
+ ARRAY_SIZE(s3c2443_common_gates));
+ samsung_clk_register_alias(ctx, s3c2443_common_aliases,
+ ARRAY_SIZE(s3c2443_common_aliases));
+
+ /* Register SoC-specific clocks. */
+ switch (current_soc) {
+ case S3C2450:
+ samsung_clk_register_div(ctx, s3c2450_dividers,
+ ARRAY_SIZE(s3c2450_dividers));
+ samsung_clk_register_mux(ctx, s3c2450_muxes,
+ ARRAY_SIZE(s3c2450_muxes));
+ samsung_clk_register_gate(ctx, s3c2450_gates,
+ ARRAY_SIZE(s3c2450_gates));
+ samsung_clk_register_alias(ctx, s3c2450_aliases,
+ ARRAY_SIZE(s3c2450_aliases));
+ /* fall through, as s3c2450 extends the s3c2416 clocks */
+ case S3C2416:
+ samsung_clk_register_div(ctx, s3c2416_dividers,
+ ARRAY_SIZE(s3c2416_dividers));
+ samsung_clk_register_mux(ctx, s3c2416_muxes,
+ ARRAY_SIZE(s3c2416_muxes));
+ samsung_clk_register_gate(ctx, s3c2416_gates,
+ ARRAY_SIZE(s3c2416_gates));
+ samsung_clk_register_alias(ctx, s3c2416_aliases,
+ ARRAY_SIZE(s3c2416_aliases));
+ break;
+ case S3C2443:
+ samsung_clk_register_div(ctx, s3c2443_dividers,
+ ARRAY_SIZE(s3c2443_dividers));
+ samsung_clk_register_gate(ctx, s3c2443_gates,
+ ARRAY_SIZE(s3c2443_gates));
+ samsung_clk_register_alias(ctx, s3c2443_aliases,
+ ARRAY_SIZE(s3c2443_aliases));
+ break;
+ }
+
+ s3c2443_clk_sleep_init();
+}
+
+static void __init s3c2416_clk_init(struct device_node *np)
+{
+ s3c2443_common_clk_init(np, 0, S3C2416, 0);
+}
+CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init);
+
+static void __init s3c2443_clk_init(struct device_node *np)
+{
+ s3c2443_common_clk_init(np, 0, S3C2443, 0);
+}
+CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init);
+
+static void __init s3c2450_clk_init(struct device_node *np)
+{
+ s3c2443_common_clk_init(np, 0, S3C2450, 0);
+}
+CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 8bda658137a8..efa16ee592c8 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -442,12 +442,14 @@ static struct samsung_clock_alias s3c6410_clock_aliases[] = {
ALIAS(MEM0_SROM, NULL, "srom"),
};
-static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f,
- unsigned long xusbxti_f)
+static void __init s3c64xx_clk_register_fixed_ext(
+ struct samsung_clk_provider *ctx,
+ unsigned long fin_pll_f,
+ unsigned long xusbxti_f)
{
s3c64xx_fixed_rate_ext_clks[0].fixed_rate = fin_pll_f;
s3c64xx_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
- samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_ext_clks,
+ samsung_clk_register_fixed_rate(ctx, s3c64xx_fixed_rate_ext_clks,
ARRAY_SIZE(s3c64xx_fixed_rate_ext_clks));
}
@@ -456,6 +458,8 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
unsigned long xusbxti_f, bool s3c6400,
void __iomem *base)
{
+ struct samsung_clk_provider *ctx;
+
reg_base = base;
is_s3c6400 = s3c6400;
@@ -465,48 +469,50 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
panic("%s: failed to map registers\n", __func__);
}
- samsung_clk_init(np, reg_base, NR_CLKS);
+ ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
/* Register external clocks. */
if (!np)
- s3c64xx_clk_register_fixed_ext(xtal_f, xusbxti_f);
+ s3c64xx_clk_register_fixed_ext(ctx, xtal_f, xusbxti_f);
/* Register PLLs. */
- samsung_clk_register_pll(s3c64xx_pll_clks,
+ samsung_clk_register_pll(ctx, s3c64xx_pll_clks,
ARRAY_SIZE(s3c64xx_pll_clks), reg_base);
/* Register common internal clocks. */
- samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_clks,
+ samsung_clk_register_fixed_rate(ctx, s3c64xx_fixed_rate_clks,
ARRAY_SIZE(s3c64xx_fixed_rate_clks));
- samsung_clk_register_mux(s3c64xx_mux_clks,
+ samsung_clk_register_mux(ctx, s3c64xx_mux_clks,
ARRAY_SIZE(s3c64xx_mux_clks));
- samsung_clk_register_div(s3c64xx_div_clks,
+ samsung_clk_register_div(ctx, s3c64xx_div_clks,
ARRAY_SIZE(s3c64xx_div_clks));
- samsung_clk_register_gate(s3c64xx_gate_clks,
+ samsung_clk_register_gate(ctx, s3c64xx_gate_clks,
ARRAY_SIZE(s3c64xx_gate_clks));
/* Register SoC-specific clocks. */
if (is_s3c6400) {
- samsung_clk_register_mux(s3c6400_mux_clks,
+ samsung_clk_register_mux(ctx, s3c6400_mux_clks,
ARRAY_SIZE(s3c6400_mux_clks));
- samsung_clk_register_div(s3c6400_div_clks,
+ samsung_clk_register_div(ctx, s3c6400_div_clks,
ARRAY_SIZE(s3c6400_div_clks));
- samsung_clk_register_gate(s3c6400_gate_clks,
+ samsung_clk_register_gate(ctx, s3c6400_gate_clks,
ARRAY_SIZE(s3c6400_gate_clks));
- samsung_clk_register_alias(s3c6400_clock_aliases,
+ samsung_clk_register_alias(ctx, s3c6400_clock_aliases,
ARRAY_SIZE(s3c6400_clock_aliases));
} else {
- samsung_clk_register_mux(s3c6410_mux_clks,
+ samsung_clk_register_mux(ctx, s3c6410_mux_clks,
ARRAY_SIZE(s3c6410_mux_clks));
- samsung_clk_register_div(s3c6410_div_clks,
+ samsung_clk_register_div(ctx, s3c6410_div_clks,
ARRAY_SIZE(s3c6410_div_clks));
- samsung_clk_register_gate(s3c6410_gate_clks,
+ samsung_clk_register_gate(ctx, s3c6410_gate_clks,
ARRAY_SIZE(s3c6410_gate_clks));
- samsung_clk_register_alias(s3c6410_clock_aliases,
+ samsung_clk_register_alias(ctx, s3c6410_clock_aliases,
ARRAY_SIZE(s3c6410_clock_aliases));
}
- samsung_clk_register_alias(s3c64xx_clock_aliases,
+ samsung_clk_register_alias(ctx, s3c64xx_clock_aliases,
ARRAY_SIZE(s3c64xx_clock_aliases));
s3c64xx_clk_sleep_init();
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 91bec3ebdc8f..49629c71c9e7 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -14,13 +14,6 @@
#include <linux/syscore_ops.h>
#include "clk.h"
-static DEFINE_SPINLOCK(lock);
-static struct clk **clk_table;
-static void __iomem *reg_base;
-#ifdef CONFIG_OF
-static struct clk_onecell_data clk_data;
-#endif
-
void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
unsigned int num_regs)
@@ -55,40 +48,58 @@ struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
}
/* setup the essentials required to support clock lookup using ccf */
-void __init samsung_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks)
+struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks)
{
- reg_base = base;
+ struct samsung_clk_provider *ctx;
+ struct clk **clk_table;
+ int ret;
+ int i;
- clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
+ ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL);
+ if (!ctx)
+ panic("could not allocate clock provider context.\n");
+
+ clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
if (!clk_table)
panic("could not allocate clock lookup table\n");
+ for (i = 0; i < nr_clks; ++i)
+ clk_table[i] = ERR_PTR(-ENOENT);
+
+ ctx->reg_base = base;
+ ctx->clk_data.clks = clk_table;
+ ctx->clk_data.clk_num = nr_clks;
+ spin_lock_init(&ctx->lock);
+
if (!np)
- return;
+ return ctx;
-#ifdef CONFIG_OF
- clk_data.clks = clk_table;
- clk_data.clk_num = nr_clks;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-#endif
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &ctx->clk_data);
+ if (ret)
+ panic("could not register clock provide\n");
+
+ return ctx;
}
/* add a clock instance to the clock lookup table used for dt based lookup */
-void samsung_clk_add_lookup(struct clk *clk, unsigned int id)
+void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk,
+ unsigned int id)
{
- if (clk_table && id)
- clk_table[id] = clk;
+ if (ctx->clk_data.clks && id)
+ ctx->clk_data.clks[id] = clk;
}
/* register a list of aliases */
-void __init samsung_clk_register_alias(struct samsung_clock_alias *list,
- unsigned int nr_clk)
+void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
+ struct samsung_clock_alias *list,
+ unsigned int nr_clk)
{
struct clk *clk;
unsigned int idx, ret;
- if (!clk_table) {
+ if (!ctx->clk_data.clks) {
pr_err("%s: clock table missing\n", __func__);
return;
}
@@ -100,7 +111,7 @@ void __init samsung_clk_register_alias(struct samsung_clock_alias *list,
continue;
}
- clk = clk_table[list->id];
+ clk = ctx->clk_data.clks[list->id];
if (!clk) {
pr_err("%s: failed to find clock %d\n", __func__,
list->id);
@@ -115,7 +126,7 @@ void __init samsung_clk_register_alias(struct samsung_clock_alias *list,
}
/* register a list of fixed clocks */
-void __init samsung_clk_register_fixed_rate(
+void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
struct samsung_fixed_rate_clock *list, unsigned int nr_clk)
{
struct clk *clk;
@@ -130,7 +141,7 @@ void __init samsung_clk_register_fixed_rate(
continue;
}
- samsung_clk_add_lookup(clk, list->id);
+ samsung_clk_add_lookup(ctx, clk, list->id);
/*
* Unconditionally add a clock lookup for the fixed rate clocks.
@@ -144,7 +155,7 @@ void __init samsung_clk_register_fixed_rate(
}
/* register a list of fixed factor clocks */
-void __init samsung_clk_register_fixed_factor(
+void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
{
struct clk *clk;
@@ -159,28 +170,30 @@ void __init samsung_clk_register_fixed_factor(
continue;
}
- samsung_clk_add_lookup(clk, list->id);
+ samsung_clk_add_lookup(ctx, clk, list->id);
}
}
/* register a list of mux clocks */
-void __init samsung_clk_register_mux(struct samsung_mux_clock *list,
- unsigned int nr_clk)
+void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
+ struct samsung_mux_clock *list,
+ unsigned int nr_clk)
{
struct clk *clk;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
clk = clk_register_mux(NULL, list->name, list->parent_names,
- list->num_parents, list->flags, reg_base + list->offset,
- list->shift, list->width, list->mux_flags, &lock);
+ list->num_parents, list->flags,
+ ctx->reg_base + list->offset,
+ list->shift, list->width, list->mux_flags, &ctx->lock);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(clk, list->id);
+ samsung_clk_add_lookup(ctx, clk, list->id);
/* register a clock lookup only if a clock alias is specified */
if (list->alias) {
@@ -194,8 +207,9 @@ void __init samsung_clk_register_mux(struct samsung_mux_clock *list,
}
/* register a list of div clocks */
-void __init samsung_clk_register_div(struct samsung_div_clock *list,
- unsigned int nr_clk)
+void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
+ struct samsung_div_clock *list,
+ unsigned int nr_clk)
{
struct clk *clk;
unsigned int idx, ret;
@@ -203,22 +217,22 @@ void __init samsung_clk_register_div(struct samsung_div_clock *list,
for (idx = 0; idx < nr_clk; idx++, list++) {
if (list->table)
clk = clk_register_divider_table(NULL, list->name,
- list->parent_name, list->flags,
- reg_base + list->offset, list->shift,
- list->width, list->div_flags,
- list->table, &lock);
+ list->parent_name, list->flags,
+ ctx->reg_base + list->offset,
+ list->shift, list->width, list->div_flags,
+ list->table, &ctx->lock);
else
clk = clk_register_divider(NULL, list->name,
- list->parent_name, list->flags,
- reg_base + list->offset, list->shift,
- list->width, list->div_flags, &lock);
+ list->parent_name, list->flags,
+ ctx->reg_base + list->offset, list->shift,
+ list->width, list->div_flags, &ctx->lock);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(clk, list->id);
+ samsung_clk_add_lookup(ctx, clk, list->id);
/* register a clock lookup only if a clock alias is specified */
if (list->alias) {
@@ -232,16 +246,17 @@ void __init samsung_clk_register_div(struct samsung_div_clock *list,
}
/* register a list of gate clocks */
-void __init samsung_clk_register_gate(struct samsung_gate_clock *list,
- unsigned int nr_clk)
+void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
+ struct samsung_gate_clock *list,
+ unsigned int nr_clk)
{
struct clk *clk;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
clk = clk_register_gate(NULL, list->name, list->parent_name,
- list->flags, reg_base + list->offset,
- list->bit_idx, list->gate_flags, &lock);
+ list->flags, ctx->reg_base + list->offset,
+ list->bit_idx, list->gate_flags, &ctx->lock);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
@@ -257,7 +272,7 @@ void __init samsung_clk_register_gate(struct samsung_gate_clock *list,
__func__, list->alias);
}
- samsung_clk_add_lookup(clk, list->id);
+ samsung_clk_add_lookup(ctx, clk, list->id);
}
}
@@ -266,21 +281,21 @@ void __init samsung_clk_register_gate(struct samsung_gate_clock *list,
* tree and register it
*/
#ifdef CONFIG_OF
-void __init samsung_clk_of_register_fixed_ext(
+void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
struct samsung_fixed_rate_clock *fixed_rate_clk,
unsigned int nr_fixed_rate_clk,
struct of_device_id *clk_matches)
{
const struct of_device_id *match;
- struct device_node *np;
+ struct device_node *clk_np;
u32 freq;
- for_each_matching_node_and_match(np, clk_matches, &match) {
- if (of_property_read_u32(np, "clock-frequency", &freq))
+ for_each_matching_node_and_match(clk_np, clk_matches, &match) {
+ if (of_property_read_u32(clk_np, "clock-frequency", &freq))
continue;
- fixed_rate_clk[(u32)match->data].fixed_rate = freq;
+ fixed_rate_clk[(unsigned long)match->data].fixed_rate = freq;
}
- samsung_clk_register_fixed_rate(fixed_rate_clk, nr_fixed_rate_clk);
+ samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
}
#endif
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index c7141ba826e0..9693b80d924f 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -22,6 +22,18 @@
#include "clk-pll.h"
/**
+ * struct samsung_clk_provider: information about clock provider
+ * @reg_base: virtual address for the register base.
+ * @clk_data: holds clock related data like clk* and number of clocks.
+ * @lock: maintains exclusion bwtween callbacks for a given clock-provider.
+ */
+struct samsung_clk_provider {
+ void __iomem *reg_base;
+ struct clk_onecell_data clk_data;
+ spinlock_t lock;
+};
+
+/**
* struct samsung_clock_alias: information about mux clock
* @id: platform specific id of the clock.
* @dev_name: name of the device to which this clock belongs.
@@ -312,40 +324,52 @@ struct samsung_pll_clock {
__PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
_lock, _con, _rtable, _alias)
-extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks);
+extern struct samsung_clk_provider *__init samsung_clk_init(
+ struct device_node *np, void __iomem *base,
+ unsigned long nr_clks);
extern void __init samsung_clk_of_register_fixed_ext(
- struct samsung_fixed_rate_clock *fixed_rate_clk,
- unsigned int nr_fixed_rate_clk,
- struct of_device_id *clk_matches);
+ struct samsung_clk_provider *ctx,
+ struct samsung_fixed_rate_clock *fixed_rate_clk,
+ unsigned int nr_fixed_rate_clk,
+ struct of_device_id *clk_matches);
-extern void samsung_clk_add_lookup(struct clk *clk, unsigned int id);
+extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
+ struct clk *clk, unsigned int id);
-extern void samsung_clk_register_alias(struct samsung_clock_alias *list,
- unsigned int nr_clk);
+extern void samsung_clk_register_alias(struct samsung_clk_provider *ctx,
+ struct samsung_clock_alias *list,
+ unsigned int nr_clk);
extern void __init samsung_clk_register_fixed_rate(
- struct samsung_fixed_rate_clock *clk_list, unsigned int nr_clk);
+ struct samsung_clk_provider *ctx,
+ struct samsung_fixed_rate_clock *clk_list,
+ unsigned int nr_clk);
extern void __init samsung_clk_register_fixed_factor(
- struct samsung_fixed_factor_clock *list, unsigned int nr_clk);
-extern void __init samsung_clk_register_mux(struct samsung_mux_clock *clk_list,
- unsigned int nr_clk);
-extern void __init samsung_clk_register_div(struct samsung_div_clock *clk_list,
- unsigned int nr_clk);
-extern void __init samsung_clk_register_gate(
- struct samsung_gate_clock *clk_list, unsigned int nr_clk);
-extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
- unsigned int nr_clk, void __iomem *base);
+ struct samsung_clk_provider *ctx,
+ struct samsung_fixed_factor_clock *list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
+ struct samsung_mux_clock *clk_list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
+ struct samsung_div_clock *clk_list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
+ struct samsung_gate_clock *clk_list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ struct samsung_pll_clock *pll_list,
+ unsigned int nr_clk, void __iomem *base);
extern unsigned long _get_rate(const char *clk_name);
extern void samsung_clk_save(void __iomem *base,
- struct samsung_clk_reg_dump *rd,
- unsigned int num_regs);
+ struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs);
extern void samsung_clk_restore(void __iomem *base,
- const struct samsung_clk_reg_dump *rd,
- unsigned int num_regs);
+ const struct samsung_clk_reg_dump *rd,
+ unsigned int num_regs);
extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
- const unsigned long *rdump,
- unsigned long nr_rdump);
+ const unsigned long *rdump,
+ unsigned long nr_rdump);
#endif /* __SAMSUNG_CLK_H */
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
gate->lock = odf_lock;
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
+ if (!div) {
+ kfree(gate);
return ERR_PTR(-ENOMEM);
+ }
div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index e1769addf435..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
#define PLLDU_LFCON_SET_DIVN 600
#define PLLE_BASE_DIVCML_SHIFT 24
-#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVCML_MASK 0xf
#define PLLE_BASE_DIVP_SHIFT 16
-#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVP_WIDTH 6
#define PLLE_BASE_DIVN_SHIFT 8
#define PLLE_BASE_DIVN_WIDTH 8
#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
mask(p->params->div_nmp->divp_width))
+#define divm_shift(p) (p)->params->div_nmp->divm_shift
+#define divn_shift(p) (p)->params->div_nmp->divn_shift
+#define divp_shift(p) (p)->params->div_nmp->divp_shift
+
+#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
+#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
+#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
+
#define divm_max(p) (divm_mask(p))
#define divn_max(p) (divn_mask(p))
#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
} else {
val = pll_readl_base(pll);
- val &= ~((divm_mask(pll) << div_nmp->divm_shift) |
- (divn_mask(pll) << div_nmp->divn_shift) |
- (divp_mask(pll) << div_nmp->divp_shift));
+ val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divp_mask_shifted(pll));
- val |= ((cfg->m << div_nmp->divm_shift) |
- (cfg->n << div_nmp->divn_shift) |
- (cfg->p << div_nmp->divp_shift));
+ val |= (cfg->m << divm_shift(pll)) |
+ (cfg->n << divn_shift(pll)) |
+ (cfg->p << divp_shift(pll));
pll_writel_base(val, pll);
}
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
/* configure dividers */
val = pll_readl_base(pll);
- val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
- val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
- val |= sel.m << pll->params->div_nmp->divm_shift;
- val |= sel.n << pll->params->div_nmp->divn_shift;
- val |= sel.p << pll->params->div_nmp->divp_shift;
+ val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divm_mask_shifted(pll));
+ val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << divm_shift(pll);
+ val |= sel.n << divn_shift(pll);
+ val |= sel.p << divp_shift(pll);
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
}
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
pll_writel_misc(val, pll);
val = readl(pll->clk_base + PLLE_SS_CTRL);
+ val &= ~PLLE_SS_COEFFICIENTS_MASK;
val |= PLLE_SS_DISABLE;
writel(val, pll->clk_base + PLLE_SS_CTRL);
- val |= pll_readl_base(pll);
+ val = pll_readl_base(pll);
val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
pll_writel_base(val, pll);
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
pll_writel(val, PLLE_SS_CTRL, pll);
val = pll_readl_base(pll);
- val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
- val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
- val |= sel.m << pll->params->div_nmp->divm_shift;
- val |= sel.n << pll->params->div_nmp->divn_shift;
+ val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divm_mask_shifted(pll));
+ val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << divm_shift(pll);
+ val |= sel.n << divn_shift(pll);
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
return clk;
}
+static struct div_nmp pll_e_nmp = {
+ .divn_shift = PLLE_BASE_DIVN_SHIFT,
+ .divn_width = PLLE_BASE_DIVN_WIDTH,
+ .divm_shift = PLLE_BASE_DIVM_SHIFT,
+ .divm_width = PLLE_BASE_DIVM_WIDTH,
+ .divp_shift = PLLE_BASE_DIVP_SHIFT,
+ .divp_width = PLLE_BASE_DIVP_WIDTH,
+};
+
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+
+ if (!pll_params->div_nmp)
+ pll_params->div_nmp = &pll_e_nmp;
+
pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
int m;
m = _pll_fixed_mdiv(pll_params, parent_rate);
- val = m << PLL_BASE_DIVM_SHIFT;
- val |= (pll_params->vco_min / parent_rate)
- << PLL_BASE_DIVN_SHIFT;
+ val = m << divm_shift(pll);
+ val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
pll_writel_base(val, pll);
}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 67c8de572c50..527a43da3d33 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -105,6 +105,12 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "func_12m_clk", "func_12m_clk"),
DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"),
DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"),
+ DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+ DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"),
+ DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"),
+ DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
new file mode 100644
index 000000000000..1530c9352a76
--- /dev/null
+++ b/drivers/clk/versatile/Kconfig
@@ -0,0 +1,26 @@
+config COMMON_CLK_VERSATILE
+ bool "Clock driver for ARM Reference designs"
+ depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64
+ ---help---
+ Supports clocking on ARM Reference designs:
+ - Integrator/AP and Integrator/CP
+ - RealView PB1176, EB, PB11MP and PBX
+ - Versatile Express
+
+config CLK_SP810
+ bool "Clock driver for ARM SP810 System Controller"
+ depends on COMMON_CLK_VERSATILE
+ default y if ARCH_VEXPRESS
+ ---help---
+ Supports clock muxing (REFCLK/TIMCLK to TIMERCLKEN0-3) capabilities
+ of the ARM SP810 System Controller cell.
+
+config CLK_VEXPRESS_OSC
+ bool "Clock driver for Versatile Express OSC clock generators"
+ depends on COMMON_CLK_VERSATILE
+ depends on VEXPRESS_CONFIG
+ default y if ARCH_VEXPRESS
+ ---help---
+ Simple regmap-based driver driving clock generators on Versatile
+ Express platforms hidden behind its configuration infrastructure,
+ commonly known as OSCs.
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index c16ca787170a..fd449f9b006d 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_ICST) += clk-icst.o
obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
-obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o clk-sp810.o
-obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o
+obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o
+obj-$(CONFIG_CLK_SP810) += clk-sp810.o
+obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 422391242b39..529a59c0fbfa 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -11,8 +11,6 @@
* Copyright (C) 2012 ARM Limited
*/
-#define pr_fmt(fmt) "vexpress-osc: " fmt
-
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
@@ -22,7 +20,7 @@
#include <linux/vexpress.h>
struct vexpress_osc {
- struct vexpress_config_func *func;
+ struct regmap *reg;
struct clk_hw hw;
unsigned long rate_min;
unsigned long rate_max;
@@ -36,7 +34,7 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw,
struct vexpress_osc *osc = to_vexpress_osc(hw);
u32 rate;
- vexpress_config_read(osc->func, 0, &rate);
+ regmap_read(osc->reg, 0, &rate);
return rate;
}
@@ -60,7 +58,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- return vexpress_config_write(osc->func, 0, rate);
+ return regmap_write(osc->reg, 0, rate);
}
static struct clk_ops vexpress_osc_ops = {
@@ -70,58 +68,31 @@ static struct clk_ops vexpress_osc_ops = {
};
-struct clk * __init vexpress_osc_setup(struct device *dev)
-{
- struct clk_init_data init;
- struct vexpress_osc *osc = kzalloc(sizeof(*osc), GFP_KERNEL);
-
- if (!osc)
- return NULL;
-
- osc->func = vexpress_config_func_get_by_dev(dev);
- if (!osc->func) {
- kfree(osc);
- return NULL;
- }
-
- init.name = dev_name(dev);
- init.ops = &vexpress_osc_ops;
- init.flags = CLK_IS_ROOT;
- init.num_parents = 0;
- osc->hw.init = &init;
-
- return clk_register(NULL, &osc->hw);
-}
-
-void __init vexpress_osc_of_setup(struct device_node *node)
+static int vexpress_osc_probe(struct platform_device *pdev)
{
+ struct clk_lookup *cl = pdev->dev.platform_data; /* Non-DT lookup */
struct clk_init_data init;
struct vexpress_osc *osc;
struct clk *clk;
u32 range[2];
- vexpress_sysreg_of_early_init();
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL);
if (!osc)
- return;
+ return -ENOMEM;
- osc->func = vexpress_config_func_get_by_node(node);
- if (!osc->func) {
- pr_err("Failed to obtain config func for node '%s'!\n",
- node->full_name);
- goto error;
- }
+ osc->reg = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(osc->reg))
+ return PTR_ERR(osc->reg);
- if (of_property_read_u32_array(node, "freq-range", range,
+ if (of_property_read_u32_array(pdev->dev.of_node, "freq-range", range,
ARRAY_SIZE(range)) == 0) {
osc->rate_min = range[0];
osc->rate_max = range[1];
}
- of_property_read_string(node, "clock-output-names", &init.name);
- if (!init.name)
- init.name = node->full_name;
+ if (of_property_read_string(pdev->dev.of_node, "clock-output-names",
+ &init.name) != 0)
+ init.name = dev_name(&pdev->dev);
init.ops = &vexpress_osc_ops;
init.flags = CLK_IS_ROOT;
@@ -130,20 +101,37 @@ void __init vexpress_osc_of_setup(struct device_node *node)
osc->hw.init = &init;
clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clock '%s'!\n", init.name);
- goto error;
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+
+ /* Only happens for non-DT cases */
+ if (cl) {
+ cl->clk = clk;
+ clkdev_add(cl);
}
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
+
+ return 0;
+}
- pr_debug("Registered clock '%s'\n", init.name);
+static struct of_device_id vexpress_osc_of_match[] = {
+ { .compatible = "arm,vexpress-osc", },
+ {}
+};
- return;
+static struct platform_driver vexpress_osc_driver = {
+ .driver = {
+ .name = "vexpress-osc",
+ .of_match_table = vexpress_osc_of_match,
+ },
+ .probe = vexpress_osc_probe,
+};
-error:
- if (osc->func)
- vexpress_config_func_put(osc->func);
- kfree(osc);
+static int __init vexpress_osc_init(void)
+{
+ return platform_driver_register(&vexpress_osc_driver);
}
-CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup);
+core_initcall(vexpress_osc_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 96918e1f26a3..43f1acf0d1d2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -191,3 +191,14 @@ config EM_TIMER_STI
config CLKSRC_QCOM
bool
+
+config CLKSRC_VERSATILE
+ bool "ARM Versatile (Express) reference platforms clock source"
+ depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ select CLKSRC_OF
+ default y if MFD_VEXPRESS_SYSREG
+ help
+ This option enables clock source based on free running
+ counter available in the "System Registers" block of
+ ARM Versatile, RealView and Versatile Express reference
+ platforms.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 98cb6c51aa87..6f25bdffc176 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
+obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 49fbe2847c84..7a08811df9aa 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -118,11 +118,11 @@ static void ttc_set_interval(struct ttc_timer *timer,
u32 ctrl_reg;
/* Disable the counter, set the counter value and re-enable counter */
- ctrl_reg = __raw_readl(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
- __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
- __raw_writel(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
+ writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
/*
* Reset the counter (0x10) so that it starts from 0, one-shot
@@ -130,7 +130,7 @@ static void ttc_set_interval(struct ttc_timer *timer,
*/
ctrl_reg |= CNT_CNTRL_RESET;
ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
- __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
}
/**
@@ -147,7 +147,7 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
struct ttc_timer *timer = &ttce->ttc;
/* Acknowledge the interrupt and call event handler */
- __raw_readl(timer->base_addr + TTC_ISR_OFFSET);
+ readl_relaxed(timer->base_addr + TTC_ISR_OFFSET);
ttce->ce.event_handler(&ttce->ce);
@@ -163,13 +163,13 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)
{
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
- return (cycle_t)__raw_readl(timer->base_addr +
+ return (cycle_t)readl_relaxed(timer->base_addr +
TTC_COUNT_VAL_OFFSET);
}
static u64 notrace ttc_sched_clock_read(void)
{
- return __raw_readl(ttc_sched_clock_val_reg);
+ return readl_relaxed(ttc_sched_clock_val_reg);
}
/**
@@ -211,17 +211,17 @@ static void ttc_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl_reg = __raw_readl(timer->base_addr +
+ ctrl_reg = readl_relaxed(timer->base_addr +
TTC_CNT_CNTRL_OFFSET);
ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
- __raw_writel(ctrl_reg,
+ writel_relaxed(ctrl_reg,
timer->base_addr + TTC_CNT_CNTRL_OFFSET);
break;
case CLOCK_EVT_MODE_RESUME:
- ctrl_reg = __raw_readl(timer->base_addr +
+ ctrl_reg = readl_relaxed(timer->base_addr +
TTC_CNT_CNTRL_OFFSET);
ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
- __raw_writel(ctrl_reg,
+ writel_relaxed(ctrl_reg,
timer->base_addr + TTC_CNT_CNTRL_OFFSET);
break;
}
@@ -266,8 +266,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
* of an abort.
*/
ttccs->scale_clk_ctrl_reg_old =
- __raw_readl(ttccs->ttc.base_addr +
- TTC_CLK_CNTRL_OFFSET);
+ readl_relaxed(ttccs->ttc.base_addr +
+ TTC_CLK_CNTRL_OFFSET);
psv = (ttccs->scale_clk_ctrl_reg_old &
TTC_CLK_CNTRL_PSV_MASK) >>
@@ -291,8 +291,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_DONE;
/* scale up: adjust divider now - before frequency change */
- __raw_writel(ttccs->scale_clk_ctrl_reg_new,
- ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
break;
}
case POST_RATE_CHANGE:
@@ -301,8 +301,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_OK;
/* scale down: adjust divider now - after frequency change */
- __raw_writel(ttccs->scale_clk_ctrl_reg_new,
- ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
break;
case ABORT_RATE_CHANGE:
@@ -311,8 +311,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_OK;
/* restore original register value */
- __raw_writel(ttccs->scale_clk_ctrl_reg_old,
- ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
/* fall through */
default:
return NOTIFY_DONE;
@@ -359,10 +359,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
* with no interrupt and it rolls over at 0xFFFF. Pre-scale
* it by 32 also. Let it start running now.
*/
- __raw_writel(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
- __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
+ writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
- __raw_writel(CNT_CNTRL_RESET,
+ writel_relaxed(CNT_CNTRL_RESET,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
@@ -438,10 +438,10 @@ static void __init ttc_setup_clockevent(struct clk *clk,
* is prescaled by 32 using the interval interrupt. Leave it
* disabled for now.
*/
- __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
- __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
+ writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
- __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
+ writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
@@ -490,7 +490,7 @@ static void __init ttc_timer_init(struct device_node *timer)
BUG();
}
- clksel = __raw_readl(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);
+ clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);
clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
@@ -498,7 +498,7 @@ static void __init ttc_timer_init(struct device_node *timer)
BUG();
}
- clksel = __raw_readl(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
+ clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index acf5a329d538..8d6420013a04 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -24,6 +24,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
#define EXYNOS4_MCTREG(x) (x)
#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
@@ -192,12 +193,19 @@ struct clocksource mct_frc = {
.resume = exynos4_frc_resume,
};
+static u64 notrace exynos4_read_sched_clock(void)
+{
+ return exynos4_frc_read(&mct_frc);
+}
+
static void __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start(0, 0);
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
+
+ sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);
}
static void exynos4_mct_comp0_stop(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 0b1836a6c539..bc8d025ce861 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -11,40 +11,93 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/sh_timer.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct sh_cmt_device;
+
+/*
+ * The CMT comes in 5 different identified flavours, depending not only on the
+ * SoC but also on the particular instance. The following table lists the main
+ * characteristics of those flavours.
+ *
+ * 16B 32B 32B-F 48B 48B-2
+ * -----------------------------------------------------------------------------
+ * Channels 2 1/4 1 6 2/8
+ * Control Width 16 16 16 16 32
+ * Counter Width 16 32 32 32/48 32/48
+ * Shared Start/Stop Y Y Y Y N
+ *
+ * The 48-bit gen2 version has a per-channel start/stop register located in the
+ * channel registers block. All other versions have a shared start/stop register
+ * located in the global space.
+ *
+ * Channels are indexed from 0 to N-1 in the documentation. The channel index
+ * infers the start/stop bit position in the control register and the channel
+ * registers block address. Some CMT instances have a subset of channels
+ * available, in which case the index in the documentation doesn't match the
+ * "real" index as implemented in hardware. This is for instance the case with
+ * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
+ * in the documentation but using start/stop bit 5 and having its registers
+ * block at 0x60.
+ *
+ * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
+ * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
+ */
+
+enum sh_cmt_model {
+ SH_CMT_16BIT,
+ SH_CMT_32BIT,
+ SH_CMT_32BIT_FAST,
+ SH_CMT_48BIT,
+ SH_CMT_48BIT_GEN2,
+};
+
+struct sh_cmt_info {
+ enum sh_cmt_model model;
-struct sh_cmt_priv {
- void __iomem *mapbase;
- void __iomem *mapbase_str;
- struct clk *clk;
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
unsigned long clear_bits;
- struct irqaction irqaction;
- struct platform_device *pdev;
+ /* callbacks for CMSTR and CMCSR access */
+ unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ void (*write_control)(void __iomem *base, unsigned long offs,
+ unsigned long value);
+
+ /* callbacks for CMCNT and CMCOR access */
+ unsigned long (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs,
+ unsigned long value);
+};
+
+struct sh_cmt_channel {
+ struct sh_cmt_device *cmt;
+
+ unsigned int index; /* Index in the documentation */
+ unsigned int hwidx; /* Real hardware index */
+
+ void __iomem *iostart;
+ void __iomem *ioctrl;
+
+ unsigned int timer_bit;
unsigned long flags;
unsigned long match_value;
unsigned long next_match_value;
@@ -55,38 +108,52 @@ struct sh_cmt_priv {
struct clocksource cs;
unsigned long total_cycles;
bool cs_enabled;
+};
- /* callbacks for CMSTR and CMCSR access */
- unsigned long (*read_control)(void __iomem *base, unsigned long offs);
- void (*write_control)(void __iomem *base, unsigned long offs,
- unsigned long value);
+struct sh_cmt_device {
+ struct platform_device *pdev;
- /* callbacks for CMCNT and CMCOR access */
- unsigned long (*read_count)(void __iomem *base, unsigned long offs);
- void (*write_count)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ const struct sh_cmt_info *info;
+ bool legacy;
+
+ void __iomem *mapbase_ch;
+ void __iomem *mapbase;
+ struct clk *clk;
+
+ struct sh_cmt_channel *channels;
+ unsigned int num_channels;
+
+ bool has_clockevent;
+ bool has_clocksource;
};
-/* Examples of supported CMT timer register layouts and I/O access widths:
- *
- * "16-bit counter and 16-bit control" as found on sh7263:
- * CMSTR 0xfffec000 16-bit
- * CMCSR 0xfffec002 16-bit
- * CMCNT 0xfffec004 16-bit
- * CMCOR 0xfffec006 16-bit
- *
- * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
- * CMSTR 0xffca0000 16-bit
- * CMCSR 0xffca0060 16-bit
- * CMCNT 0xffca0064 32-bit
- * CMCOR 0xffca0068 32-bit
- *
- * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
- * CMSTR 0xffca0500 32-bit
- * CMCSR 0xffca0510 32-bit
- * CMCNT 0xffca0514 32-bit
- * CMCOR 0xffca0518 32-bit
- */
+#define SH_CMT16_CMCSR_CMF (1 << 7)
+#define SH_CMT16_CMCSR_CMIE (1 << 6)
+#define SH_CMT16_CMCSR_CKS8 (0 << 0)
+#define SH_CMT16_CMCSR_CKS32 (1 << 0)
+#define SH_CMT16_CMCSR_CKS128 (2 << 0)
+#define SH_CMT16_CMCSR_CKS512 (3 << 0)
+#define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
+
+#define SH_CMT32_CMCSR_CMF (1 << 15)
+#define SH_CMT32_CMCSR_OVF (1 << 14)
+#define SH_CMT32_CMCSR_WRFLG (1 << 13)
+#define SH_CMT32_CMCSR_STTF (1 << 12)
+#define SH_CMT32_CMCSR_STPF (1 << 11)
+#define SH_CMT32_CMCSR_SSIE (1 << 10)
+#define SH_CMT32_CMCSR_CMS (1 << 9)
+#define SH_CMT32_CMCSR_CMM (1 << 8)
+#define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
+#define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
+#define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
+#define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
+#define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
+#define SH_CMT32_CMCSR_DBGIVD (1 << 3)
+#define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
+#define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
+#define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
+#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
+#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
{
@@ -110,64 +177,123 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,
iowrite32(value, base + (offs << 2));
}
+static const struct sh_cmt_info sh_cmt_info[] = {
+ [SH_CMT_16BIT] = {
+ .model = SH_CMT_16BIT,
+ .width = 16,
+ .overflow_bit = SH_CMT16_CMCSR_CMF,
+ .clear_bits = ~SH_CMT16_CMCSR_CMF,
+ .read_control = sh_cmt_read16,
+ .write_control = sh_cmt_write16,
+ .read_count = sh_cmt_read16,
+ .write_count = sh_cmt_write16,
+ },
+ [SH_CMT_32BIT] = {
+ .model = SH_CMT_32BIT,
+ .width = 32,
+ .overflow_bit = SH_CMT32_CMCSR_CMF,
+ .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
+ .read_control = sh_cmt_read16,
+ .write_control = sh_cmt_write16,
+ .read_count = sh_cmt_read32,
+ .write_count = sh_cmt_write32,
+ },
+ [SH_CMT_32BIT_FAST] = {
+ .model = SH_CMT_32BIT_FAST,
+ .width = 32,
+ .overflow_bit = SH_CMT32_CMCSR_CMF,
+ .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
+ .read_control = sh_cmt_read16,
+ .write_control = sh_cmt_write16,
+ .read_count = sh_cmt_read32,
+ .write_count = sh_cmt_write32,
+ },
+ [SH_CMT_48BIT] = {
+ .model = SH_CMT_48BIT,
+ .width = 32,
+ .overflow_bit = SH_CMT32_CMCSR_CMF,
+ .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
+ .read_control = sh_cmt_read32,
+ .write_control = sh_cmt_write32,
+ .read_count = sh_cmt_read32,
+ .write_count = sh_cmt_write32,
+ },
+ [SH_CMT_48BIT_GEN2] = {
+ .model = SH_CMT_48BIT_GEN2,
+ .width = 32,
+ .overflow_bit = SH_CMT32_CMCSR_CMF,
+ .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
+ .read_control = sh_cmt_read32,
+ .write_control = sh_cmt_write32,
+ .read_count = sh_cmt_read32,
+ .write_count = sh_cmt_write32,
+ },
+};
+
#define CMCSR 0 /* channel register */
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
+static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
- return p->read_control(p->mapbase_str, 0);
+ if (ch->iostart)
+ return ch->cmt->info->read_control(ch->iostart, 0);
+ else
+ return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
-static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
+static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
+ unsigned long value)
{
- return p->read_control(p->mapbase, CMCSR);
+ if (ch->iostart)
+ ch->cmt->info->write_control(ch->iostart, 0, value);
+ else
+ ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
}
-static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
+static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
- return p->read_count(p->mapbase, CMCNT);
+ return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
-static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
unsigned long value)
{
- p->write_control(p->mapbase_str, 0, value);
+ ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
}
-static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
- unsigned long value)
+static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
- p->write_control(p->mapbase, CMCSR, value);
+ return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
-static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
unsigned long value)
{
- p->write_count(p->mapbase, CMCNT, value);
+ ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
}
-static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
+static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
unsigned long value)
{
- p->write_count(p->mapbase, CMCOR, value);
+ ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
}
-static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
+static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
int *has_wrapped)
{
unsigned long v1, v2, v3;
int o1, o2;
- o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
+ o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
- v1 = sh_cmt_read_cmcnt(p);
- v2 = sh_cmt_read_cmcnt(p);
- v3 = sh_cmt_read_cmcnt(p);
- o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
+ v1 = sh_cmt_read_cmcnt(ch);
+ v2 = sh_cmt_read_cmcnt(ch);
+ v3 = sh_cmt_read_cmcnt(ch);
+ o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
@@ -177,52 +303,56 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
-static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
+static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&sh_cmt_lock, flags);
- value = sh_cmt_read_cmstr(p);
+ value = sh_cmt_read_cmstr(ch);
if (start)
- value |= 1 << cfg->timer_bit;
+ value |= 1 << ch->timer_bit;
else
- value &= ~(1 << cfg->timer_bit);
+ value &= ~(1 << ch->timer_bit);
- sh_cmt_write_cmstr(p, value);
+ sh_cmt_write_cmstr(ch, value);
raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
-static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
+static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
{
int k, ret;
- pm_runtime_get_sync(&p->pdev->dev);
- dev_pm_syscore_device(&p->pdev->dev, true);
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_enable(ch->cmt->clk);
if (ret) {
- dev_err(&p->pdev->dev, "cannot enable clock\n");
+ dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
+ ch->index);
goto err0;
}
/* make sure channel is disabled */
- sh_cmt_start_stop_ch(p, 0);
+ sh_cmt_start_stop_ch(ch, 0);
/* configure channel, periodic mode and maximum timeout */
- if (p->width == 16) {
- *rate = clk_get_rate(p->clk) / 512;
- sh_cmt_write_cmcsr(p, 0x43);
+ if (ch->cmt->info->width == 16) {
+ *rate = clk_get_rate(ch->cmt->clk) / 512;
+ sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
+ SH_CMT16_CMCSR_CKS512);
} else {
- *rate = clk_get_rate(p->clk) / 8;
- sh_cmt_write_cmcsr(p, 0x01a4);
+ *rate = clk_get_rate(ch->cmt->clk) / 8;
+ sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
+ SH_CMT32_CMCSR_CMTOUT_IE |
+ SH_CMT32_CMCSR_CMR_IRQ |
+ SH_CMT32_CMCSR_CKS_RCLK8);
}
- sh_cmt_write_cmcor(p, 0xffffffff);
- sh_cmt_write_cmcnt(p, 0);
+ sh_cmt_write_cmcor(ch, 0xffffffff);
+ sh_cmt_write_cmcnt(ch, 0);
/*
* According to the sh73a0 user's manual, as CMCNT can be operated
@@ -236,41 +366,42 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
* take RCLKx2 at maximum.
*/
for (k = 0; k < 100; k++) {
- if (!sh_cmt_read_cmcnt(p))
+ if (!sh_cmt_read_cmcnt(ch))
break;
udelay(1);
}
- if (sh_cmt_read_cmcnt(p)) {
- dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
+ if (sh_cmt_read_cmcnt(ch)) {
+ dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
+ ch->index);
ret = -ETIMEDOUT;
goto err1;
}
/* enable channel */
- sh_cmt_start_stop_ch(p, 1);
+ sh_cmt_start_stop_ch(ch, 1);
return 0;
err1:
/* stop clock */
- clk_disable(p->clk);
+ clk_disable(ch->cmt->clk);
err0:
return ret;
}
-static void sh_cmt_disable(struct sh_cmt_priv *p)
+static void sh_cmt_disable(struct sh_cmt_channel *ch)
{
/* disable channel */
- sh_cmt_start_stop_ch(p, 0);
+ sh_cmt_start_stop_ch(ch, 0);
/* disable interrupts in CMT block */
- sh_cmt_write_cmcsr(p, 0);
+ sh_cmt_write_cmcsr(ch, 0);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable(ch->cmt->clk);
- dev_pm_syscore_device(&p->pdev->dev, false);
- pm_runtime_put(&p->pdev->dev);
+ dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
+ pm_runtime_put(&ch->cmt->pdev->dev);
}
/* private flags */
@@ -280,24 +411,24 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
#define FLAG_SKIPEVENT (1 << 3)
#define FLAG_IRQCONTEXT (1 << 4)
-static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
+static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
unsigned long new_match;
- unsigned long value = p->next_match_value;
+ unsigned long value = ch->next_match_value;
unsigned long delay = 0;
unsigned long now = 0;
int has_wrapped;
- now = sh_cmt_get_counter(p, &has_wrapped);
- p->flags |= FLAG_REPROGRAM; /* force reprogram */
+ now = sh_cmt_get_counter(ch, &has_wrapped);
+ ch->flags |= FLAG_REPROGRAM; /* force reprogram */
if (has_wrapped) {
/* we're competing with the interrupt handler.
* -> let the interrupt handler reprogram the timer.
* -> interrupt number two handles the event.
*/
- p->flags |= FLAG_SKIPEVENT;
+ ch->flags |= FLAG_SKIPEVENT;
return;
}
@@ -309,20 +440,20 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
* but don't save the new match value yet.
*/
new_match = now + value + delay;
- if (new_match > p->max_match_value)
- new_match = p->max_match_value;
+ if (new_match > ch->max_match_value)
+ new_match = ch->max_match_value;
- sh_cmt_write_cmcor(p, new_match);
+ sh_cmt_write_cmcor(ch, new_match);
- now = sh_cmt_get_counter(p, &has_wrapped);
- if (has_wrapped && (new_match > p->match_value)) {
+ now = sh_cmt_get_counter(ch, &has_wrapped);
+ if (has_wrapped && (new_match > ch->match_value)) {
/* we are changing to a greater match value,
* so this wrap must be caused by the counter
* matching the old value.
* -> first interrupt reprograms the timer.
* -> interrupt number two handles the event.
*/
- p->flags |= FLAG_SKIPEVENT;
+ ch->flags |= FLAG_SKIPEVENT;
break;
}
@@ -333,7 +464,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
* -> save programmed match value.
* -> let isr handle the event.
*/
- p->match_value = new_match;
+ ch->match_value = new_match;
break;
}
@@ -344,7 +475,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
* -> save programmed match value.
* -> let isr handle the event.
*/
- p->match_value = new_match;
+ ch->match_value = new_match;
break;
}
@@ -360,138 +491,141 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
delay = 1;
if (!delay)
- dev_warn(&p->pdev->dev, "too long delay\n");
+ dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
+ ch->index);
} while (delay);
}
-static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
+static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
{
- if (delta > p->max_match_value)
- dev_warn(&p->pdev->dev, "delta out of range\n");
+ if (delta > ch->max_match_value)
+ dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
+ ch->index);
- p->next_match_value = delta;
- sh_cmt_clock_event_program_verify(p, 0);
+ ch->next_match_value = delta;
+ sh_cmt_clock_event_program_verify(ch, 0);
}
-static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
+static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
{
unsigned long flags;
- raw_spin_lock_irqsave(&p->lock, flags);
- __sh_cmt_set_next(p, delta);
- raw_spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_lock_irqsave(&ch->lock, flags);
+ __sh_cmt_set_next(ch, delta);
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{
- struct sh_cmt_priv *p = dev_id;
+ struct sh_cmt_channel *ch = dev_id;
/* clear flags */
- sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
+ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
+ ch->cmt->info->clear_bits);
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
* isr before we end up here.
*/
- if (p->flags & FLAG_CLOCKSOURCE)
- p->total_cycles += p->match_value + 1;
+ if (ch->flags & FLAG_CLOCKSOURCE)
+ ch->total_cycles += ch->match_value + 1;
- if (!(p->flags & FLAG_REPROGRAM))
- p->next_match_value = p->max_match_value;
+ if (!(ch->flags & FLAG_REPROGRAM))
+ ch->next_match_value = ch->max_match_value;
- p->flags |= FLAG_IRQCONTEXT;
+ ch->flags |= FLAG_IRQCONTEXT;
- if (p->flags & FLAG_CLOCKEVENT) {
- if (!(p->flags & FLAG_SKIPEVENT)) {
- if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
- p->next_match_value = p->max_match_value;
- p->flags |= FLAG_REPROGRAM;
+ if (ch->flags & FLAG_CLOCKEVENT) {
+ if (!(ch->flags & FLAG_SKIPEVENT)) {
+ if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
+ ch->next_match_value = ch->max_match_value;
+ ch->flags |= FLAG_REPROGRAM;
}
- p->ced.event_handler(&p->ced);
+ ch->ced.event_handler(&ch->ced);
}
}
- p->flags &= ~FLAG_SKIPEVENT;
+ ch->flags &= ~FLAG_SKIPEVENT;
- if (p->flags & FLAG_REPROGRAM) {
- p->flags &= ~FLAG_REPROGRAM;
- sh_cmt_clock_event_program_verify(p, 1);
+ if (ch->flags & FLAG_REPROGRAM) {
+ ch->flags &= ~FLAG_REPROGRAM;
+ sh_cmt_clock_event_program_verify(ch, 1);
- if (p->flags & FLAG_CLOCKEVENT)
- if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
- || (p->match_value == p->next_match_value))
- p->flags &= ~FLAG_REPROGRAM;
+ if (ch->flags & FLAG_CLOCKEVENT)
+ if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
+ || (ch->match_value == ch->next_match_value))
+ ch->flags &= ~FLAG_REPROGRAM;
}
- p->flags &= ~FLAG_IRQCONTEXT;
+ ch->flags &= ~FLAG_IRQCONTEXT;
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
+static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
{
int ret = 0;
unsigned long flags;
- raw_spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
- ret = sh_cmt_enable(p, &p->rate);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ ret = sh_cmt_enable(ch, &ch->rate);
if (ret)
goto out;
- p->flags |= flag;
+ ch->flags |= flag;
/* setup timeout if no clockevent */
- if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
- __sh_cmt_set_next(p, p->max_match_value);
+ if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
+ __sh_cmt_set_next(ch, ch->max_match_value);
out:
- raw_spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
+static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
{
unsigned long flags;
unsigned long f;
- raw_spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&ch->lock, flags);
- f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- p->flags &= ~flag;
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+ ch->flags &= ~flag;
- if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
- sh_cmt_disable(p);
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
- __sh_cmt_set_next(p, p->max_match_value);
+ if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ __sh_cmt_set_next(ch, ch->max_match_value);
- raw_spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
}
-static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
+static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
{
- return container_of(cs, struct sh_cmt_priv, cs);
+ return container_of(cs, struct sh_cmt_channel, cs);
}
static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
{
- struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
unsigned long flags, raw;
unsigned long value;
int has_wrapped;
- raw_spin_lock_irqsave(&p->lock, flags);
- value = p->total_cycles;
- raw = sh_cmt_get_counter(p, &has_wrapped);
+ raw_spin_lock_irqsave(&ch->lock, flags);
+ value = ch->total_cycles;
+ raw = sh_cmt_get_counter(ch, &has_wrapped);
if (unlikely(has_wrapped))
- raw += p->match_value + 1;
- raw_spin_unlock_irqrestore(&p->lock, flags);
+ raw += ch->match_value + 1;
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
return value + raw;
}
@@ -499,53 +633,53 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
int ret;
- struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- WARN_ON(p->cs_enabled);
+ WARN_ON(ch->cs_enabled);
- p->total_cycles = 0;
+ ch->total_cycles = 0;
- ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
if (!ret) {
- __clocksource_updatefreq_hz(cs, p->rate);
- p->cs_enabled = true;
+ __clocksource_updatefreq_hz(cs, ch->rate);
+ ch->cs_enabled = true;
}
return ret;
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
{
- struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- WARN_ON(!p->cs_enabled);
+ WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(p, FLAG_CLOCKSOURCE);
- p->cs_enabled = false;
+ sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ ch->cs_enabled = false;
}
static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
- struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- sh_cmt_stop(p, FLAG_CLOCKSOURCE);
- pm_genpd_syscore_poweroff(&p->pdev->dev);
+ sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
- struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- pm_genpd_syscore_poweron(&p->pdev->dev);
- sh_cmt_start(p, FLAG_CLOCKSOURCE);
+ pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
+ sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
-static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
- char *name, unsigned long rating)
+static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
+ const char *name)
{
- struct clocksource *cs = &p->cs;
+ struct clocksource *cs = &ch->cs;
cs->name = name;
- cs->rating = rating;
+ cs->rating = 125;
cs->read = sh_cmt_clocksource_read;
cs->enable = sh_cmt_clocksource_enable;
cs->disable = sh_cmt_clocksource_disable;
@@ -554,47 +688,48 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- dev_info(&p->pdev->dev, "used as clock source\n");
+ dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
+ ch->index);
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
-static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
+static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
{
- return container_of(ced, struct sh_cmt_priv, ced);
+ return container_of(ced, struct sh_cmt_channel, ced);
}
-static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
+static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- struct clock_event_device *ced = &p->ced;
+ struct clock_event_device *ced = &ch->ced;
- sh_cmt_start(p, FLAG_CLOCKEVENT);
+ sh_cmt_start(ch, FLAG_CLOCKEVENT);
/* TODO: calculate good shift from rate and counter bit width */
ced->shift = 32;
- ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
- ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
+ ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift);
+ ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
if (periodic)
- sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
+ sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);
else
- sh_cmt_set_next(p, p->max_match_value);
+ sh_cmt_set_next(ch, ch->max_match_value);
}
static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
- struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
- sh_cmt_stop(p, FLAG_CLOCKEVENT);
+ sh_cmt_stop(ch, FLAG_CLOCKEVENT);
break;
default:
break;
@@ -602,16 +737,18 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&p->pdev->dev, "used for periodic clock events\n");
- sh_cmt_clock_event_start(p, 1);
+ dev_info(&ch->cmt->pdev->dev,
+ "ch%u: used for periodic clock events\n", ch->index);
+ sh_cmt_clock_event_start(ch, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&p->pdev->dev, "used for oneshot clock events\n");
- sh_cmt_clock_event_start(p, 0);
+ dev_info(&ch->cmt->pdev->dev,
+ "ch%u: used for oneshot clock events\n", ch->index);
+ sh_cmt_clock_event_start(ch, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
- sh_cmt_stop(p, FLAG_CLOCKEVENT);
+ sh_cmt_stop(ch, FLAG_CLOCKEVENT);
break;
default:
break;
@@ -621,196 +758,341 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
- struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
- if (likely(p->flags & FLAG_IRQCONTEXT))
- p->next_match_value = delta - 1;
+ if (likely(ch->flags & FLAG_IRQCONTEXT))
+ ch->next_match_value = delta - 1;
else
- sh_cmt_set_next(p, delta - 1);
+ sh_cmt_set_next(ch, delta - 1);
return 0;
}
static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
{
- struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- pm_genpd_syscore_poweroff(&p->pdev->dev);
- clk_unprepare(p->clk);
+ pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
+ clk_unprepare(ch->cmt->clk);
}
static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
{
- struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- clk_prepare(p->clk);
- pm_genpd_syscore_poweron(&p->pdev->dev);
+ clk_prepare(ch->cmt->clk);
+ pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
}
-static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
- char *name, unsigned long rating)
+static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
+ const char *name)
{
- struct clock_event_device *ced = &p->ced;
+ struct clock_event_device *ced = &ch->ced;
+ int irq;
+ int ret;
- memset(ced, 0, sizeof(*ced));
+ irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index);
+ if (irq < 0) {
+ dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
+ ch->index);
+ return irq;
+ }
+
+ ret = request_irq(irq, sh_cmt_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&ch->cmt->pdev->dev), ch);
+ if (ret) {
+ dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
+ ch->index, irq);
+ return ret;
+ }
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
- ced->rating = rating;
- ced->cpumask = cpumask_of(0);
+ ced->rating = 125;
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
ced->suspend = sh_cmt_clock_event_suspend;
ced->resume = sh_cmt_clock_event_resume;
- dev_info(&p->pdev->dev, "used for clock events\n");
+ dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
+ ch->index);
clockevents_register_device(ced);
+
+ return 0;
}
-static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
- unsigned long clockevent_rating,
- unsigned long clocksource_rating)
+static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
+ bool clockevent, bool clocksource)
{
- if (clockevent_rating)
- sh_cmt_register_clockevent(p, name, clockevent_rating);
+ int ret;
+
+ if (clockevent) {
+ ch->cmt->has_clockevent = true;
+ ret = sh_cmt_register_clockevent(ch, name);
+ if (ret < 0)
+ return ret;
+ }
- if (clocksource_rating)
- sh_cmt_register_clocksource(p, name, clocksource_rating);
+ if (clocksource) {
+ ch->cmt->has_clocksource = true;
+ sh_cmt_register_clocksource(ch, name);
+ }
return 0;
}
-static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
+static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
+ unsigned int hwidx, bool clockevent,
+ bool clocksource, struct sh_cmt_device *cmt)
{
- struct sh_timer_config *cfg = pdev->dev.platform_data;
- struct resource *res, *res2;
- int irq, ret;
- ret = -ENXIO;
+ int ret;
- memset(p, 0, sizeof(*p));
- p->pdev = pdev;
+ /* Skip unused channels. */
+ if (!clockevent && !clocksource)
+ return 0;
- if (!cfg) {
- dev_err(&p->pdev->dev, "missing platform data\n");
- goto err0;
+ ch->cmt = cmt;
+ ch->index = index;
+ ch->hwidx = hwidx;
+
+ /*
+ * Compute the address of the channel control register block. For the
+ * timers with a per-channel start/stop register, compute its address
+ * as well.
+ *
+ * For legacy configuration the address has been mapped explicitly.
+ */
+ if (cmt->legacy) {
+ ch->ioctrl = cmt->mapbase_ch;
+ } else {
+ switch (cmt->info->model) {
+ case SH_CMT_16BIT:
+ ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
+ break;
+ case SH_CMT_32BIT:
+ case SH_CMT_48BIT:
+ ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
+ break;
+ case SH_CMT_32BIT_FAST:
+ /*
+ * The 32-bit "fast" timer has a single channel at hwidx
+ * 5 but is located at offset 0x40 instead of 0x60 for
+ * some reason.
+ */
+ ch->ioctrl = cmt->mapbase + 0x40;
+ break;
+ case SH_CMT_48BIT_GEN2:
+ ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
+ ch->ioctrl = ch->iostart + 0x10;
+ break;
+ }
}
- res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&p->pdev->dev, "failed to get I/O memory\n");
- goto err0;
+ if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
+ ch->max_match_value = ~0;
+ else
+ ch->max_match_value = (1 << cmt->info->width) - 1;
+
+ ch->match_value = ch->max_match_value;
+ raw_spin_lock_init(&ch->lock);
+
+ if (cmt->legacy) {
+ ch->timer_bit = ch->hwidx;
+ } else {
+ ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
+ ? 0 : ch->hwidx;
}
- /* optional resource for the shared timer start/stop register */
- res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
+ ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
+ clockevent, clocksource);
+ if (ret) {
+ dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
+ ch->index);
+ return ret;
+ }
+ ch->cs_enabled = false;
- irq = platform_get_irq(p->pdev, 0);
- if (irq < 0) {
- dev_err(&p->pdev->dev, "failed to get irq\n");
- goto err0;
+ return 0;
+}
+
+static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
+{
+ struct resource *mem;
+
+ mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
+ return -ENXIO;
}
- /* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
- if (p->mapbase == NULL) {
- dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
- goto err0;
+ cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
+ if (cmt->mapbase == NULL) {
+ dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
+ return -ENXIO;
}
- /* map second resource for CMSTR */
- p->mapbase_str = ioremap_nocache(res2 ? res2->start :
- res->start - cfg->channel_offset,
- res2 ? resource_size(res2) : 2);
- if (p->mapbase_str == NULL) {
- dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
- goto err1;
+ return 0;
+}
+
+static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt)
+{
+ struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
+ struct resource *res, *res2;
+
+ /* map memory, let mapbase_ch point to our channel */
+ res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
+ return -ENXIO;
}
- /* request irq using setup_irq() (too early for request_irq()) */
- p->irqaction.name = dev_name(&p->pdev->dev);
- p->irqaction.handler = sh_cmt_interrupt;
- p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
-
- /* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, "cmt_fck");
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err2;
+ cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
+ if (cmt->mapbase_ch == NULL) {
+ dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
+ return -ENXIO;
}
- ret = clk_prepare(p->clk);
- if (ret < 0)
- goto err3;
+ /* optional resource for the shared timer start/stop register */
+ res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
- if (res2 && (resource_size(res2) == 4)) {
- /* assume both CMSTR and CMCSR to be 32-bit */
- p->read_control = sh_cmt_read32;
- p->write_control = sh_cmt_write32;
- } else {
- p->read_control = sh_cmt_read16;
- p->write_control = sh_cmt_write16;
+ /* map second resource for CMSTR */
+ cmt->mapbase = ioremap_nocache(res2 ? res2->start :
+ res->start - cfg->channel_offset,
+ res2 ? resource_size(res2) : 2);
+ if (cmt->mapbase == NULL) {
+ dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
+ iounmap(cmt->mapbase_ch);
+ return -ENXIO;
}
- if (resource_size(res) == 6) {
- p->width = 16;
- p->read_count = sh_cmt_read16;
- p->write_count = sh_cmt_write16;
- p->overflow_bit = 0x80;
- p->clear_bits = ~0x80;
- } else {
- p->width = 32;
- p->read_count = sh_cmt_read32;
- p->write_count = sh_cmt_write32;
- p->overflow_bit = 0x8000;
- p->clear_bits = ~0xc000;
+ /* identify the model based on the resources */
+ if (resource_size(res) == 6)
+ cmt->info = &sh_cmt_info[SH_CMT_16BIT];
+ else if (res2 && (resource_size(res2) == 4))
+ cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
+ else
+ cmt->info = &sh_cmt_info[SH_CMT_32BIT];
+
+ return 0;
+}
+
+static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt)
+{
+ iounmap(cmt->mapbase);
+ if (cmt->mapbase_ch)
+ iounmap(cmt->mapbase_ch);
+}
+
+static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ const struct platform_device_id *id = pdev->id_entry;
+ unsigned int hw_channels;
+ int ret;
+
+ memset(cmt, 0, sizeof(*cmt));
+ cmt->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&cmt->pdev->dev, "missing platform data\n");
+ return -ENXIO;
}
- if (p->width == (sizeof(p->max_match_value) * 8))
- p->max_match_value = ~0;
+ cmt->info = (const struct sh_cmt_info *)id->driver_data;
+ cmt->legacy = cmt->info ? false : true;
+
+ /* Get hold of clock. */
+ cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck");
+ if (IS_ERR(cmt->clk)) {
+ dev_err(&cmt->pdev->dev, "cannot get clock\n");
+ return PTR_ERR(cmt->clk);
+ }
+
+ ret = clk_prepare(cmt->clk);
+ if (ret < 0)
+ goto err_clk_put;
+
+ /*
+ * Map the memory resource(s). We need to support both the legacy
+ * platform device configuration (with one device per channel) and the
+ * new version (with multiple channels per device).
+ */
+ if (cmt->legacy)
+ ret = sh_cmt_map_memory_legacy(cmt);
else
- p->max_match_value = (1 << p->width) - 1;
+ ret = sh_cmt_map_memory(cmt);
- p->match_value = p->max_match_value;
- raw_spin_lock_init(&p->lock);
+ if (ret < 0)
+ goto err_clk_unprepare;
- ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating,
- cfg->clocksource_rating);
- if (ret) {
- dev_err(&p->pdev->dev, "registration failed\n");
- goto err4;
+ /* Allocate and setup the channels. */
+ if (cmt->legacy) {
+ cmt->num_channels = 1;
+ hw_channels = 0;
+ } else {
+ cmt->num_channels = hweight8(cfg->channels_mask);
+ hw_channels = cfg->channels_mask;
}
- p->cs_enabled = false;
- ret = setup_irq(irq, &p->irqaction);
- if (ret) {
- dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err4;
+ cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
+ GFP_KERNEL);
+ if (cmt->channels == NULL) {
+ ret = -ENOMEM;
+ goto err_unmap;
}
- platform_set_drvdata(pdev, p);
+ if (cmt->legacy) {
+ ret = sh_cmt_setup_channel(&cmt->channels[0],
+ cfg->timer_bit, cfg->timer_bit,
+ cfg->clockevent_rating != 0,
+ cfg->clocksource_rating != 0, cmt);
+ if (ret < 0)
+ goto err_unmap;
+ } else {
+ unsigned int mask = hw_channels;
+ unsigned int i;
+
+ /*
+ * Use the first channel as a clock event device and the second
+ * channel as a clock source. If only one channel is available
+ * use it for both.
+ */
+ for (i = 0; i < cmt->num_channels; ++i) {
+ unsigned int hwidx = ffs(mask) - 1;
+ bool clocksource = i == 1 || cmt->num_channels == 1;
+ bool clockevent = i == 0;
+
+ ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
+ clockevent, clocksource,
+ cmt);
+ if (ret < 0)
+ goto err_unmap;
+
+ mask &= ~(1 << hwidx);
+ }
+ }
+
+ platform_set_drvdata(pdev, cmt);
return 0;
-err4:
- clk_unprepare(p->clk);
-err3:
- clk_put(p->clk);
-err2:
- iounmap(p->mapbase_str);
-err1:
- iounmap(p->mapbase);
-err0:
+
+err_unmap:
+ kfree(cmt->channels);
+ sh_cmt_unmap_memory(cmt);
+err_clk_unprepare:
+ clk_unprepare(cmt->clk);
+err_clk_put:
+ clk_put(cmt->clk);
return ret;
}
static int sh_cmt_probe(struct platform_device *pdev)
{
- struct sh_cmt_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev)) {
@@ -818,20 +1100,20 @@ static int sh_cmt_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
}
- if (p) {
+ if (cmt) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
+ cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
+ if (cmt == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
- ret = sh_cmt_setup(p, pdev);
+ ret = sh_cmt_setup(cmt, pdev);
if (ret) {
- kfree(p);
+ kfree(cmt);
pm_runtime_idle(&pdev->dev);
return ret;
}
@@ -839,7 +1121,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
return 0;
out:
- if (cfg->clockevent_rating || cfg->clocksource_rating)
+ if (cmt->has_clockevent || cmt->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
@@ -852,12 +1134,24 @@ static int sh_cmt_remove(struct platform_device *pdev)
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
+static const struct platform_device_id sh_cmt_id_table[] = {
+ { "sh_cmt", 0 },
+ { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
+ { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
+ { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
+ { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
+ { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
+
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.remove = sh_cmt_remove,
.driver = {
.name = "sh_cmt",
- }
+ },
+ .id_table = sh_cmt_id_table,
};
static int __init sh_cmt_init(void)
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index e30d76e0a6fa..f2c1c36139e1 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -11,37 +11,48 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/clockchips.h>
-#include <linux/sh_timer.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct sh_mtu2_device;
+
+struct sh_mtu2_channel {
+ struct sh_mtu2_device *mtu;
+ unsigned int index;
+
+ void __iomem *base;
+ int irq;
+
+ struct clock_event_device ced;
+};
+
+struct sh_mtu2_device {
+ struct platform_device *pdev;
-struct sh_mtu2_priv {
void __iomem *mapbase;
struct clk *clk;
- struct irqaction irqaction;
- struct platform_device *pdev;
- unsigned long rate;
- unsigned long periodic;
- struct clock_event_device ced;
+
+ struct sh_mtu2_channel *channels;
+ unsigned int num_channels;
+
+ bool legacy;
+ bool has_clockevent;
};
static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
@@ -55,6 +66,88 @@ static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
#define TCNT 5 /* channel register */
#define TGR 6 /* channel register */
+#define TCR_CCLR_NONE (0 << 5)
+#define TCR_CCLR_TGRA (1 << 5)
+#define TCR_CCLR_TGRB (2 << 5)
+#define TCR_CCLR_SYNC (3 << 5)
+#define TCR_CCLR_TGRC (5 << 5)
+#define TCR_CCLR_TGRD (6 << 5)
+#define TCR_CCLR_MASK (7 << 5)
+#define TCR_CKEG_RISING (0 << 3)
+#define TCR_CKEG_FALLING (1 << 3)
+#define TCR_CKEG_BOTH (2 << 3)
+#define TCR_CKEG_MASK (3 << 3)
+/* Values 4 to 7 are channel-dependent */
+#define TCR_TPSC_P1 (0 << 0)
+#define TCR_TPSC_P4 (1 << 0)
+#define TCR_TPSC_P16 (2 << 0)
+#define TCR_TPSC_P64 (3 << 0)
+#define TCR_TPSC_CH0_TCLKA (4 << 0)
+#define TCR_TPSC_CH0_TCLKB (5 << 0)
+#define TCR_TPSC_CH0_TCLKC (6 << 0)
+#define TCR_TPSC_CH0_TCLKD (7 << 0)
+#define TCR_TPSC_CH1_TCLKA (4 << 0)
+#define TCR_TPSC_CH1_TCLKB (5 << 0)
+#define TCR_TPSC_CH1_P256 (6 << 0)
+#define TCR_TPSC_CH1_TCNT2 (7 << 0)
+#define TCR_TPSC_CH2_TCLKA (4 << 0)
+#define TCR_TPSC_CH2_TCLKB (5 << 0)
+#define TCR_TPSC_CH2_TCLKC (6 << 0)
+#define TCR_TPSC_CH2_P1024 (7 << 0)
+#define TCR_TPSC_CH34_P256 (4 << 0)
+#define TCR_TPSC_CH34_P1024 (5 << 0)
+#define TCR_TPSC_CH34_TCLKA (6 << 0)
+#define TCR_TPSC_CH34_TCLKB (7 << 0)
+#define TCR_TPSC_MASK (7 << 0)
+
+#define TMDR_BFE (1 << 6)
+#define TMDR_BFB (1 << 5)
+#define TMDR_BFA (1 << 4)
+#define TMDR_MD_NORMAL (0 << 0)
+#define TMDR_MD_PWM_1 (2 << 0)
+#define TMDR_MD_PWM_2 (3 << 0)
+#define TMDR_MD_PHASE_1 (4 << 0)
+#define TMDR_MD_PHASE_2 (5 << 0)
+#define TMDR_MD_PHASE_3 (6 << 0)
+#define TMDR_MD_PHASE_4 (7 << 0)
+#define TMDR_MD_PWM_SYNC (8 << 0)
+#define TMDR_MD_PWM_COMP_CREST (13 << 0)
+#define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
+#define TMDR_MD_PWM_COMP_BOTH (15 << 0)
+#define TMDR_MD_MASK (15 << 0)
+
+#define TIOC_IOCH(n) ((n) << 4)
+#define TIOC_IOCL(n) ((n) << 0)
+#define TIOR_OC_RETAIN (0 << 0)
+#define TIOR_OC_0_CLEAR (1 << 0)
+#define TIOR_OC_0_SET (2 << 0)
+#define TIOR_OC_0_TOGGLE (3 << 0)
+#define TIOR_OC_1_CLEAR (5 << 0)
+#define TIOR_OC_1_SET (6 << 0)
+#define TIOR_OC_1_TOGGLE (7 << 0)
+#define TIOR_IC_RISING (8 << 0)
+#define TIOR_IC_FALLING (9 << 0)
+#define TIOR_IC_BOTH (10 << 0)
+#define TIOR_IC_TCNT (12 << 0)
+#define TIOR_MASK (15 << 0)
+
+#define TIER_TTGE (1 << 7)
+#define TIER_TTGE2 (1 << 6)
+#define TIER_TCIEU (1 << 5)
+#define TIER_TCIEV (1 << 4)
+#define TIER_TGIED (1 << 3)
+#define TIER_TGIEC (1 << 2)
+#define TIER_TGIEB (1 << 1)
+#define TIER_TGIEA (1 << 0)
+
+#define TSR_TCFD (1 << 7)
+#define TSR_TCFU (1 << 5)
+#define TSR_TCFV (1 << 4)
+#define TSR_TGFD (1 << 3)
+#define TSR_TGFC (1 << 2)
+#define TSR_TGFB (1 << 1)
+#define TSR_TGFA (1 << 0)
+
static unsigned long mtu2_reg_offs[] = {
[TCR] = 0,
[TMDR] = 1,
@@ -65,135 +158,143 @@ static unsigned long mtu2_reg_offs[] = {
[TGR] = 8,
};
-static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
+static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
unsigned long offs;
- if (reg_nr == TSTR)
- return ioread8(base + cfg->channel_offset);
+ if (reg_nr == TSTR) {
+ if (ch->mtu->legacy)
+ return ioread8(ch->mtu->mapbase);
+ else
+ return ioread8(ch->mtu->mapbase + 0x280);
+ }
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
- return ioread16(base + offs);
+ return ioread16(ch->base + offs);
else
- return ioread8(base + offs);
+ return ioread8(ch->base + offs);
}
-static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
+static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
unsigned long value)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR) {
- iowrite8(value, base + cfg->channel_offset);
- return;
+ if (ch->mtu->legacy)
+ return iowrite8(value, ch->mtu->mapbase);
+ else
+ return iowrite8(value, ch->mtu->mapbase + 0x280);
}
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
- iowrite16(value, base + offs);
+ iowrite16(value, ch->base + offs);
else
- iowrite8(value, base + offs);
+ iowrite8(value, ch->base + offs);
}
-static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
+static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
- value = sh_mtu2_read(p, TSTR);
+ value = sh_mtu2_read(ch, TSTR);
if (start)
- value |= 1 << cfg->timer_bit;
+ value |= 1 << ch->index;
else
- value &= ~(1 << cfg->timer_bit);
+ value &= ~(1 << ch->index);
- sh_mtu2_write(p, TSTR, value);
+ sh_mtu2_write(ch, TSTR, value);
raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
}
-static int sh_mtu2_enable(struct sh_mtu2_priv *p)
+static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
{
+ unsigned long periodic;
+ unsigned long rate;
int ret;
- pm_runtime_get_sync(&p->pdev->dev);
- dev_pm_syscore_device(&p->pdev->dev, true);
+ pm_runtime_get_sync(&ch->mtu->pdev->dev);
+ dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_enable(ch->mtu->clk);
if (ret) {
- dev_err(&p->pdev->dev, "cannot enable clock\n");
+ dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
+ ch->index);
return ret;
}
/* make sure channel is disabled */
- sh_mtu2_start_stop_ch(p, 0);
-
- p->rate = clk_get_rate(p->clk) / 64;
- p->periodic = (p->rate + HZ/2) / HZ;
-
- /* "Periodic Counter Operation" */
- sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
- sh_mtu2_write(p, TIOR, 0);
- sh_mtu2_write(p, TGR, p->periodic);
- sh_mtu2_write(p, TCNT, 0);
- sh_mtu2_write(p, TMDR, 0);
- sh_mtu2_write(p, TIER, 0x01);
+ sh_mtu2_start_stop_ch(ch, 0);
+
+ rate = clk_get_rate(ch->mtu->clk) / 64;
+ periodic = (rate + HZ/2) / HZ;
+
+ /*
+ * "Periodic Counter Operation"
+ * Clear on TGRA compare match, divide clock by 64.
+ */
+ sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
+ sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
+ TIOC_IOCL(TIOR_OC_0_CLEAR));
+ sh_mtu2_write(ch, TGR, periodic);
+ sh_mtu2_write(ch, TCNT, 0);
+ sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
+ sh_mtu2_write(ch, TIER, TIER_TGIEA);
/* enable channel */
- sh_mtu2_start_stop_ch(p, 1);
+ sh_mtu2_start_stop_ch(ch, 1);
return 0;
}
-static void sh_mtu2_disable(struct sh_mtu2_priv *p)
+static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
{
/* disable channel */
- sh_mtu2_start_stop_ch(p, 0);
+ sh_mtu2_start_stop_ch(ch, 0);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable(ch->mtu->clk);
- dev_pm_syscore_device(&p->pdev->dev, false);
- pm_runtime_put(&p->pdev->dev);
+ dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
+ pm_runtime_put(&ch->mtu->pdev->dev);
}
static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
{
- struct sh_mtu2_priv *p = dev_id;
+ struct sh_mtu2_channel *ch = dev_id;
/* acknowledge interrupt */
- sh_mtu2_read(p, TSR);
- sh_mtu2_write(p, TSR, 0xfe);
+ sh_mtu2_read(ch, TSR);
+ sh_mtu2_write(ch, TSR, ~TSR_TGFA);
/* notify clockevent layer */
- p->ced.event_handler(&p->ced);
+ ch->ced.event_handler(&ch->ced);
return IRQ_HANDLED;
}
-static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
+static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
{
- return container_of(ced, struct sh_mtu2_priv, ced);
+ return container_of(ced, struct sh_mtu2_channel, ced);
}
static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
- struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
+ struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
int disabled = 0;
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
- sh_mtu2_disable(p);
+ sh_mtu2_disable(ch);
disabled = 1;
break;
default:
@@ -202,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&p->pdev->dev, "used for periodic clock events\n");
- sh_mtu2_enable(p);
+ dev_info(&ch->mtu->pdev->dev,
+ "ch%u: used for periodic clock events\n", ch->index);
+ sh_mtu2_enable(ch);
break;
case CLOCK_EVT_MODE_UNUSED:
if (!disabled)
- sh_mtu2_disable(p);
+ sh_mtu2_disable(ch);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
default:
@@ -217,125 +319,207 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
+ pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
}
static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
+ pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
}
-static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
- char *name, unsigned long rating)
+static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
+ const char *name)
{
- struct clock_event_device *ced = &p->ced;
+ struct clock_event_device *ced = &ch->ced;
int ret;
- memset(ced, 0, sizeof(*ced));
-
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
- ced->rating = rating;
- ced->cpumask = cpumask_of(0);
+ ced->rating = 200;
+ ced->cpumask = cpu_possible_mask;
ced->set_mode = sh_mtu2_clock_event_mode;
ced->suspend = sh_mtu2_clock_event_suspend;
ced->resume = sh_mtu2_clock_event_resume;
- dev_info(&p->pdev->dev, "used for clock events\n");
+ dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
+ ch->index);
clockevents_register_device(ced);
- ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ ret = request_irq(ch->irq, sh_mtu2_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&ch->mtu->pdev->dev), ch);
if (ret) {
- dev_err(&p->pdev->dev, "failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
+ ch->index, ch->irq);
return;
}
}
-static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
- unsigned long clockevent_rating)
+static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name,
+ bool clockevent)
{
- if (clockevent_rating)
- sh_mtu2_register_clockevent(p, name, clockevent_rating);
+ if (clockevent) {
+ ch->mtu->has_clockevent = true;
+ sh_mtu2_register_clockevent(ch, name);
+ }
return 0;
}
-static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
+static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
+ struct sh_mtu2_device *mtu)
{
- struct sh_timer_config *cfg = pdev->dev.platform_data;
- struct resource *res;
- int irq, ret;
- ret = -ENXIO;
+ static const unsigned int channel_offsets[] = {
+ 0x300, 0x380, 0x000,
+ };
+ bool clockevent;
+
+ ch->mtu = mtu;
+
+ if (mtu->legacy) {
+ struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
+
+ clockevent = cfg->clockevent_rating != 0;
- memset(p, 0, sizeof(*p));
- p->pdev = pdev;
+ ch->irq = platform_get_irq(mtu->pdev, 0);
+ ch->base = mtu->mapbase - cfg->channel_offset;
+ ch->index = cfg->timer_bit;
+ } else {
+ char name[6];
- if (!cfg) {
- dev_err(&p->pdev->dev, "missing platform data\n");
- goto err0;
+ clockevent = true;
+
+ sprintf(name, "tgi%ua", index);
+ ch->irq = platform_get_irq_byname(mtu->pdev, name);
+ ch->base = mtu->mapbase + channel_offsets[index];
+ ch->index = index;
}
- platform_set_drvdata(pdev, p);
+ if (ch->irq < 0) {
+ /* Skip channels with no declared interrupt. */
+ if (!mtu->legacy)
+ return 0;
+
+ dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n",
+ ch->index);
+ return ch->irq;
+ }
+
+ return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent);
+}
- res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
+{
+ struct resource *res;
+
+ res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&p->pdev->dev, "failed to get I/O memory\n");
- goto err0;
+ dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
+ return -ENXIO;
}
- irq = platform_get_irq(p->pdev, 0);
- if (irq < 0) {
- dev_err(&p->pdev->dev, "failed to get irq\n");
- goto err0;
+ mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (mtu->mapbase == NULL)
+ return -ENXIO;
+
+ /*
+ * In legacy platform device configuration (with one device per channel)
+ * the resource points to the channel base address.
+ */
+ if (mtu->legacy) {
+ struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
+ mtu->mapbase += cfg->channel_offset;
}
- /* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
- if (p->mapbase == NULL) {
- dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
- goto err0;
+ return 0;
+}
+
+static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
+{
+ if (mtu->legacy) {
+ struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
+ mtu->mapbase -= cfg->channel_offset;
}
- /* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = dev_name(&p->pdev->dev);
- p->irqaction.handler = sh_mtu2_interrupt;
- p->irqaction.dev_id = p;
- p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
-
- /* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
+ iounmap(mtu->mapbase);
+}
+
+static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
+ struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ const struct platform_device_id *id = pdev->id_entry;
+ unsigned int i;
+ int ret;
+
+ mtu->pdev = pdev;
+ mtu->legacy = id->driver_data;
+
+ if (mtu->legacy && !cfg) {
+ dev_err(&mtu->pdev->dev, "missing platform data\n");
+ return -ENXIO;
}
- ret = clk_prepare(p->clk);
- if (ret < 0)
- goto err2;
+ /* Get hold of clock. */
+ mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck");
+ if (IS_ERR(mtu->clk)) {
+ dev_err(&mtu->pdev->dev, "cannot get clock\n");
+ return PTR_ERR(mtu->clk);
+ }
- ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating);
+ ret = clk_prepare(mtu->clk);
if (ret < 0)
- goto err3;
+ goto err_clk_put;
+
+ /* Map the memory resource. */
+ ret = sh_mtu2_map_memory(mtu);
+ if (ret < 0) {
+ dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
+ goto err_clk_unprepare;
+ }
+
+ /* Allocate and setup the channels. */
+ if (mtu->legacy)
+ mtu->num_channels = 1;
+ else
+ mtu->num_channels = 3;
+
+ mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
+ GFP_KERNEL);
+ if (mtu->channels == NULL) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ if (mtu->legacy) {
+ ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu);
+ if (ret < 0)
+ goto err_unmap;
+ } else {
+ for (i = 0; i < mtu->num_channels; ++i) {
+ ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
+ if (ret < 0)
+ goto err_unmap;
+ }
+ }
+
+ platform_set_drvdata(pdev, mtu);
return 0;
- err3:
- clk_unprepare(p->clk);
- err2:
- clk_put(p->clk);
- err1:
- iounmap(p->mapbase);
- err0:
+
+err_unmap:
+ kfree(mtu->channels);
+ sh_mtu2_unmap_memory(mtu);
+err_clk_unprepare:
+ clk_unprepare(mtu->clk);
+err_clk_put:
+ clk_put(mtu->clk);
return ret;
}
static int sh_mtu2_probe(struct platform_device *pdev)
{
- struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev)) {
@@ -343,20 +527,20 @@ static int sh_mtu2_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
}
- if (p) {
+ if (mtu) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
+ mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
+ if (mtu == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
- ret = sh_mtu2_setup(p, pdev);
+ ret = sh_mtu2_setup(mtu, pdev);
if (ret) {
- kfree(p);
+ kfree(mtu);
pm_runtime_idle(&pdev->dev);
return ret;
}
@@ -364,7 +548,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
return 0;
out:
- if (cfg->clockevent_rating)
+ if (mtu->has_clockevent)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
@@ -377,12 +561,20 @@ static int sh_mtu2_remove(struct platform_device *pdev)
return -EBUSY; /* cannot unregister clockevent */
}
+static const struct platform_device_id sh_mtu2_id_table[] = {
+ { "sh_mtu2", 1 },
+ { "sh-mtu2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
+
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
.remove = sh_mtu2_remove,
.driver = {
.name = "sh_mtu2",
- }
+ },
+ .id_table = sh_mtu2_id_table,
};
static int __init sh_mtu2_init(void)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index ecd7b60bfdfa..4ba2c0fea580 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -11,35 +11,41 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/sh_timer.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+enum sh_tmu_model {
+ SH_TMU_LEGACY,
+ SH_TMU,
+ SH_TMU_SH3,
+};
+
+struct sh_tmu_device;
+
+struct sh_tmu_channel {
+ struct sh_tmu_device *tmu;
+ unsigned int index;
+
+ void __iomem *base;
+ int irq;
-struct sh_tmu_priv {
- void __iomem *mapbase;
- struct clk *clk;
- struct irqaction irqaction;
- struct platform_device *pdev;
unsigned long rate;
unsigned long periodic;
struct clock_event_device ced;
@@ -48,6 +54,21 @@ struct sh_tmu_priv {
unsigned int enable_count;
};
+struct sh_tmu_device {
+ struct platform_device *pdev;
+
+ void __iomem *mapbase;
+ struct clk *clk;
+
+ enum sh_tmu_model model;
+
+ struct sh_tmu_channel *channels;
+ unsigned int num_channels;
+
+ bool has_clockevent;
+ bool has_clocksource;
+};
+
static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
#define TSTR -1 /* shared register */
@@ -55,189 +76,208 @@ static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
#define TCNT 1 /* channel register */
#define TCR 2 /* channel register */
-static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
+#define TCR_UNF (1 << 8)
+#define TCR_UNIE (1 << 5)
+#define TCR_TPSC_CLK4 (0 << 0)
+#define TCR_TPSC_CLK16 (1 << 0)
+#define TCR_TPSC_CLK64 (2 << 0)
+#define TCR_TPSC_CLK256 (3 << 0)
+#define TCR_TPSC_CLK1024 (4 << 0)
+#define TCR_TPSC_MASK (7 << 0)
+
+static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
unsigned long offs;
- if (reg_nr == TSTR)
- return ioread8(base - cfg->channel_offset);
+ if (reg_nr == TSTR) {
+ switch (ch->tmu->model) {
+ case SH_TMU_LEGACY:
+ return ioread8(ch->tmu->mapbase);
+ case SH_TMU_SH3:
+ return ioread8(ch->tmu->mapbase + 2);
+ case SH_TMU:
+ return ioread8(ch->tmu->mapbase + 4);
+ }
+ }
offs = reg_nr << 2;
if (reg_nr == TCR)
- return ioread16(base + offs);
+ return ioread16(ch->base + offs);
else
- return ioread32(base + offs);
+ return ioread32(ch->base + offs);
}
-static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
+static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
unsigned long value)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR) {
- iowrite8(value, base - cfg->channel_offset);
- return;
+ switch (ch->tmu->model) {
+ case SH_TMU_LEGACY:
+ return iowrite8(value, ch->tmu->mapbase);
+ case SH_TMU_SH3:
+ return iowrite8(value, ch->tmu->mapbase + 2);
+ case SH_TMU:
+ return iowrite8(value, ch->tmu->mapbase + 4);
+ }
}
offs = reg_nr << 2;
if (reg_nr == TCR)
- iowrite16(value, base + offs);
+ iowrite16(value, ch->base + offs);
else
- iowrite32(value, base + offs);
+ iowrite32(value, ch->base + offs);
}
-static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
+static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&sh_tmu_lock, flags);
- value = sh_tmu_read(p, TSTR);
+ value = sh_tmu_read(ch, TSTR);
if (start)
- value |= 1 << cfg->timer_bit;
+ value |= 1 << ch->index;
else
- value &= ~(1 << cfg->timer_bit);
+ value &= ~(1 << ch->index);
- sh_tmu_write(p, TSTR, value);
+ sh_tmu_write(ch, TSTR, value);
raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
}
-static int __sh_tmu_enable(struct sh_tmu_priv *p)
+static int __sh_tmu_enable(struct sh_tmu_channel *ch)
{
int ret;
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_enable(ch->tmu->clk);
if (ret) {
- dev_err(&p->pdev->dev, "cannot enable clock\n");
+ dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
+ ch->index);
return ret;
}
/* make sure channel is disabled */
- sh_tmu_start_stop_ch(p, 0);
+ sh_tmu_start_stop_ch(ch, 0);
/* maximum timeout */
- sh_tmu_write(p, TCOR, 0xffffffff);
- sh_tmu_write(p, TCNT, 0xffffffff);
+ sh_tmu_write(ch, TCOR, 0xffffffff);
+ sh_tmu_write(ch, TCNT, 0xffffffff);
/* configure channel to parent clock / 4, irq off */
- p->rate = clk_get_rate(p->clk) / 4;
- sh_tmu_write(p, TCR, 0x0000);
+ ch->rate = clk_get_rate(ch->tmu->clk) / 4;
+ sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* enable channel */
- sh_tmu_start_stop_ch(p, 1);
+ sh_tmu_start_stop_ch(ch, 1);
return 0;
}
-static int sh_tmu_enable(struct sh_tmu_priv *p)
+static int sh_tmu_enable(struct sh_tmu_channel *ch)
{
- if (p->enable_count++ > 0)
+ if (ch->enable_count++ > 0)
return 0;
- pm_runtime_get_sync(&p->pdev->dev);
- dev_pm_syscore_device(&p->pdev->dev, true);
+ pm_runtime_get_sync(&ch->tmu->pdev->dev);
+ dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
- return __sh_tmu_enable(p);
+ return __sh_tmu_enable(ch);
}
-static void __sh_tmu_disable(struct sh_tmu_priv *p)
+static void __sh_tmu_disable(struct sh_tmu_channel *ch)
{
/* disable channel */
- sh_tmu_start_stop_ch(p, 0);
+ sh_tmu_start_stop_ch(ch, 0);
/* disable interrupts in TMU block */
- sh_tmu_write(p, TCR, 0x0000);
+ sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable(ch->tmu->clk);
}
-static void sh_tmu_disable(struct sh_tmu_priv *p)
+static void sh_tmu_disable(struct sh_tmu_channel *ch)
{
- if (WARN_ON(p->enable_count == 0))
+ if (WARN_ON(ch->enable_count == 0))
return;
- if (--p->enable_count > 0)
+ if (--ch->enable_count > 0)
return;
- __sh_tmu_disable(p);
+ __sh_tmu_disable(ch);
- dev_pm_syscore_device(&p->pdev->dev, false);
- pm_runtime_put(&p->pdev->dev);
+ dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
+ pm_runtime_put(&ch->tmu->pdev->dev);
}
-static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
+static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
int periodic)
{
/* stop timer */
- sh_tmu_start_stop_ch(p, 0);
+ sh_tmu_start_stop_ch(ch, 0);
/* acknowledge interrupt */
- sh_tmu_read(p, TCR);
+ sh_tmu_read(ch, TCR);
/* enable interrupt */
- sh_tmu_write(p, TCR, 0x0020);
+ sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* reload delta value in case of periodic timer */
if (periodic)
- sh_tmu_write(p, TCOR, delta);
+ sh_tmu_write(ch, TCOR, delta);
else
- sh_tmu_write(p, TCOR, 0xffffffff);
+ sh_tmu_write(ch, TCOR, 0xffffffff);
- sh_tmu_write(p, TCNT, delta);
+ sh_tmu_write(ch, TCNT, delta);
/* start timer */
- sh_tmu_start_stop_ch(p, 1);
+ sh_tmu_start_stop_ch(ch, 1);
}
static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
{
- struct sh_tmu_priv *p = dev_id;
+ struct sh_tmu_channel *ch = dev_id;
/* disable or acknowledge interrupt */
- if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
- sh_tmu_write(p, TCR, 0x0000);
+ if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+ sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
else
- sh_tmu_write(p, TCR, 0x0020);
+ sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* notify clockevent layer */
- p->ced.event_handler(&p->ced);
+ ch->ced.event_handler(&ch->ced);
return IRQ_HANDLED;
}
-static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
+static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
{
- return container_of(cs, struct sh_tmu_priv, cs);
+ return container_of(cs, struct sh_tmu_channel, cs);
}
static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
{
- struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
- return sh_tmu_read(p, TCNT) ^ 0xffffffff;
+ return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
}
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
- struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
int ret;
- if (WARN_ON(p->cs_enabled))
+ if (WARN_ON(ch->cs_enabled))
return 0;
- ret = sh_tmu_enable(p);
+ ret = sh_tmu_enable(ch);
if (!ret) {
- __clocksource_updatefreq_hz(cs, p->rate);
- p->cs_enabled = true;
+ __clocksource_updatefreq_hz(cs, ch->rate);
+ ch->cs_enabled = true;
}
return ret;
@@ -245,48 +285,48 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
static void sh_tmu_clocksource_disable(struct clocksource *cs)
{
- struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
- if (WARN_ON(!p->cs_enabled))
+ if (WARN_ON(!ch->cs_enabled))
return;
- sh_tmu_disable(p);
- p->cs_enabled = false;
+ sh_tmu_disable(ch);
+ ch->cs_enabled = false;
}
static void sh_tmu_clocksource_suspend(struct clocksource *cs)
{
- struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
- if (!p->cs_enabled)
+ if (!ch->cs_enabled)
return;
- if (--p->enable_count == 0) {
- __sh_tmu_disable(p);
- pm_genpd_syscore_poweroff(&p->pdev->dev);
+ if (--ch->enable_count == 0) {
+ __sh_tmu_disable(ch);
+ pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
}
}
static void sh_tmu_clocksource_resume(struct clocksource *cs)
{
- struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
- if (!p->cs_enabled)
+ if (!ch->cs_enabled)
return;
- if (p->enable_count++ == 0) {
- pm_genpd_syscore_poweron(&p->pdev->dev);
- __sh_tmu_enable(p);
+ if (ch->enable_count++ == 0) {
+ pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
+ __sh_tmu_enable(ch);
}
}
-static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
- char *name, unsigned long rating)
+static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
+ const char *name)
{
- struct clocksource *cs = &p->cs;
+ struct clocksource *cs = &ch->cs;
cs->name = name;
- cs->rating = rating;
+ cs->rating = 200;
cs->read = sh_tmu_clocksource_read;
cs->enable = sh_tmu_clocksource_enable;
cs->disable = sh_tmu_clocksource_disable;
@@ -295,43 +335,44 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- dev_info(&p->pdev->dev, "used as clock source\n");
+ dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
+ ch->index);
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
-static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
+static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
{
- return container_of(ced, struct sh_tmu_priv, ced);
+ return container_of(ced, struct sh_tmu_channel, ced);
}
-static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
+static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
{
- struct clock_event_device *ced = &p->ced;
+ struct clock_event_device *ced = &ch->ced;
- sh_tmu_enable(p);
+ sh_tmu_enable(ch);
- clockevents_config(ced, p->rate);
+ clockevents_config(ced, ch->rate);
if (periodic) {
- p->periodic = (p->rate + HZ/2) / HZ;
- sh_tmu_set_next(p, p->periodic, 1);
+ ch->periodic = (ch->rate + HZ/2) / HZ;
+ sh_tmu_set_next(ch, ch->periodic, 1);
}
}
static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
- struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+ struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
int disabled = 0;
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
- sh_tmu_disable(p);
+ sh_tmu_disable(ch);
disabled = 1;
break;
default:
@@ -340,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&p->pdev->dev, "used for periodic clock events\n");
- sh_tmu_clock_event_start(p, 1);
+ dev_info(&ch->tmu->pdev->dev,
+ "ch%u: used for periodic clock events\n", ch->index);
+ sh_tmu_clock_event_start(ch, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&p->pdev->dev, "used for oneshot clock events\n");
- sh_tmu_clock_event_start(p, 0);
+ dev_info(&ch->tmu->pdev->dev,
+ "ch%u: used for oneshot clock events\n", ch->index);
+ sh_tmu_clock_event_start(ch, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
if (!disabled)
- sh_tmu_disable(p);
+ sh_tmu_disable(ch);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
default:
@@ -360,147 +403,234 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
static int sh_tmu_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
- struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+ struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
/* program new delta value */
- sh_tmu_set_next(p, delta, 0);
+ sh_tmu_set_next(ch, delta, 0);
return 0;
}
static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
+ pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
+ pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
-static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
- char *name, unsigned long rating)
+static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
+ const char *name)
{
- struct clock_event_device *ced = &p->ced;
+ struct clock_event_device *ced = &ch->ced;
int ret;
- memset(ced, 0, sizeof(*ced));
-
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
- ced->rating = rating;
+ ced->rating = 200;
ced->cpumask = cpumask_of(0);
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
ced->resume = sh_tmu_clock_event_resume;
- dev_info(&p->pdev->dev, "used for clock events\n");
+ dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
+ ch->index);
clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
- ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ ret = request_irq(ch->irq, sh_tmu_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&ch->tmu->pdev->dev), ch);
if (ret) {
- dev_err(&p->pdev->dev, "failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
+ ch->index, ch->irq);
return;
}
}
-static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
- unsigned long clockevent_rating,
- unsigned long clocksource_rating)
+static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
+ bool clockevent, bool clocksource)
{
- if (clockevent_rating)
- sh_tmu_register_clockevent(p, name, clockevent_rating);
- else if (clocksource_rating)
- sh_tmu_register_clocksource(p, name, clocksource_rating);
+ if (clockevent) {
+ ch->tmu->has_clockevent = true;
+ sh_tmu_register_clockevent(ch, name);
+ } else if (clocksource) {
+ ch->tmu->has_clocksource = true;
+ sh_tmu_register_clocksource(ch, name);
+ }
return 0;
}
-static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
+static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
+ bool clockevent, bool clocksource,
+ struct sh_tmu_device *tmu)
{
- struct sh_timer_config *cfg = pdev->dev.platform_data;
- struct resource *res;
- int irq, ret;
- ret = -ENXIO;
+ /* Skip unused channels. */
+ if (!clockevent && !clocksource)
+ return 0;
- memset(p, 0, sizeof(*p));
- p->pdev = pdev;
+ ch->tmu = tmu;
+
+ if (tmu->model == SH_TMU_LEGACY) {
+ struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
+
+ /*
+ * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
+ * channel registers blocks at base + 2 + 12 * index, while all
+ * other variants map them at base + 4 + 12 * index. We can
+ * compute the index by just dividing by 12, the 2 bytes or 4
+ * bytes offset being hidden by the integer division.
+ */
+ ch->index = cfg->channel_offset / 12;
+ ch->base = tmu->mapbase + cfg->channel_offset;
+ } else {
+ ch->index = index;
+
+ if (tmu->model == SH_TMU_SH3)
+ ch->base = tmu->mapbase + 4 + ch->index * 12;
+ else
+ ch->base = tmu->mapbase + 8 + ch->index * 12;
+ }
- if (!cfg) {
- dev_err(&p->pdev->dev, "missing platform data\n");
- goto err0;
+ ch->irq = platform_get_irq(tmu->pdev, ch->index);
+ if (ch->irq < 0) {
+ dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
+ ch->index);
+ return ch->irq;
}
- platform_set_drvdata(pdev, p);
+ ch->cs_enabled = false;
+ ch->enable_count = 0;
+
+ return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
+ clockevent, clocksource);
+}
- res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
+{
+ struct resource *res;
+
+ res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&p->pdev->dev, "failed to get I/O memory\n");
- goto err0;
+ dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
+ return -ENXIO;
+ }
+
+ tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (tmu->mapbase == NULL)
+ return -ENXIO;
+
+ /*
+ * In legacy platform device configuration (with one device per channel)
+ * the resource points to the channel base address.
+ */
+ if (tmu->model == SH_TMU_LEGACY) {
+ struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
+ tmu->mapbase -= cfg->channel_offset;
}
- irq = platform_get_irq(p->pdev, 0);
- if (irq < 0) {
- dev_err(&p->pdev->dev, "failed to get irq\n");
- goto err0;
+ return 0;
+}
+
+static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu)
+{
+ if (tmu->model == SH_TMU_LEGACY) {
+ struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
+ tmu->mapbase += cfg->channel_offset;
}
- /* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
- if (p->mapbase == NULL) {
- dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
- goto err0;
+ iounmap(tmu->mapbase);
+}
+
+static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ const struct platform_device_id *id = pdev->id_entry;
+ unsigned int i;
+ int ret;
+
+ if (!cfg) {
+ dev_err(&tmu->pdev->dev, "missing platform data\n");
+ return -ENXIO;
}
- /* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = dev_name(&p->pdev->dev);
- p->irqaction.handler = sh_tmu_interrupt;
- p->irqaction.dev_id = p;
- p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
-
- /* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, "tmu_fck");
- if (IS_ERR(p->clk)) {
- dev_err(&p->pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
+ tmu->pdev = pdev;
+ tmu->model = id->driver_data;
+
+ /* Get hold of clock. */
+ tmu->clk = clk_get(&tmu->pdev->dev,
+ tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
+ if (IS_ERR(tmu->clk)) {
+ dev_err(&tmu->pdev->dev, "cannot get clock\n");
+ return PTR_ERR(tmu->clk);
}
- ret = clk_prepare(p->clk);
+ ret = clk_prepare(tmu->clk);
if (ret < 0)
- goto err2;
+ goto err_clk_put;
- p->cs_enabled = false;
- p->enable_count = 0;
+ /* Map the memory resource. */
+ ret = sh_tmu_map_memory(tmu);
+ if (ret < 0) {
+ dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
+ goto err_clk_unprepare;
+ }
- ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating,
- cfg->clocksource_rating);
- if (ret < 0)
- goto err3;
+ /* Allocate and setup the channels. */
+ if (tmu->model == SH_TMU_LEGACY)
+ tmu->num_channels = 1;
+ else
+ tmu->num_channels = hweight8(cfg->channels_mask);
+
+ tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
+ GFP_KERNEL);
+ if (tmu->channels == NULL) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ if (tmu->model == SH_TMU_LEGACY) {
+ ret = sh_tmu_channel_setup(&tmu->channels[0], 0,
+ cfg->clockevent_rating != 0,
+ cfg->clocksource_rating != 0, tmu);
+ if (ret < 0)
+ goto err_unmap;
+ } else {
+ /*
+ * Use the first channel as a clock event device and the second
+ * channel as a clock source.
+ */
+ for (i = 0; i < tmu->num_channels; ++i) {
+ ret = sh_tmu_channel_setup(&tmu->channels[i], i,
+ i == 0, i == 1, tmu);
+ if (ret < 0)
+ goto err_unmap;
+ }
+ }
+
+ platform_set_drvdata(pdev, tmu);
return 0;
- err3:
- clk_unprepare(p->clk);
- err2:
- clk_put(p->clk);
- err1:
- iounmap(p->mapbase);
- err0:
+err_unmap:
+ kfree(tmu->channels);
+ sh_tmu_unmap_memory(tmu);
+err_clk_unprepare:
+ clk_unprepare(tmu->clk);
+err_clk_put:
+ clk_put(tmu->clk);
return ret;
}
static int sh_tmu_probe(struct platform_device *pdev)
{
- struct sh_tmu_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev)) {
@@ -508,20 +638,20 @@ static int sh_tmu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
}
- if (p) {
+ if (tmu) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
+ tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
+ if (tmu == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
- ret = sh_tmu_setup(p, pdev);
+ ret = sh_tmu_setup(tmu, pdev);
if (ret) {
- kfree(p);
+ kfree(tmu);
pm_runtime_idle(&pdev->dev);
return ret;
}
@@ -529,7 +659,7 @@ static int sh_tmu_probe(struct platform_device *pdev)
return 0;
out:
- if (cfg->clockevent_rating || cfg->clocksource_rating)
+ if (tmu->has_clockevent || tmu->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
@@ -542,12 +672,21 @@ static int sh_tmu_remove(struct platform_device *pdev)
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
+static const struct platform_device_id sh_tmu_id_table[] = {
+ { "sh_tmu", SH_TMU_LEGACY },
+ { "sh-tmu", SH_TMU },
+ { "sh-tmu-sh3", SH_TMU_SH3 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
+
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
.remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
- }
+ },
+ .id_table = sh_tmu_id_table,
};
static int __init sh_tmu_init(void)
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable_unprepare(tcd->clk);
+ clk_disable(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_prepare_enable(tcd->clk);
+ clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_prepare_enable(tcd->clk);
+ clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
ret = clk_prepare_enable(t2_clk);
if (ret)
return ret;
- clk_disable_unprepare(t2_clk);
+ clk_disable(t2_clk);
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..dbd30398222a 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
action->dev_id = ce;
BUG_ON(setup_irq(ce->irq, action));
- irq_set_affinity(action->irq, cpumask_of(cpu));
+ irq_force_affinity(action->irq, cpumask_of(cpu));
clockevents_register_device(ce);
return 0;
@@ -252,15 +252,13 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_marco_timer_init(void)
+static void __init sirfsoc_marco_timer_init(struct device_node *np)
{
unsigned long rate;
u32 timer_div;
struct clk *clk;
- /* timer's input clock is io clock */
- clk = clk_get_sys("io", NULL);
-
+ clk = of_clk_get(np, 0);
BUG_ON(IS_ERR(clk));
rate = clk_get_rate(clk);
@@ -303,6 +301,6 @@ static void __init sirfsoc_of_timer_init(struct device_node *np)
if (!sirfsoc_timer1_irq.irq)
panic("No irq passed for timer1 via DT\n");
- sirfsoc_marco_timer_init();
+ sirfsoc_marco_timer_init(np);
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init );
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 1a6b2d6356d6..a722aac7ac02 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -61,7 +61,8 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
- WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & BIT(0)));
+ WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) &
+ BIT(0)));
/* clear timer0 interrupt */
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
@@ -77,9 +78,11 @@ static cycle_t sirfsoc_timer_read(struct clocksource *cs)
u64 cycles;
/* latch the 64-bit timer counter */
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
+ sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI);
- cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
+ cycles = (cycles << 32) |
+ readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
return cycles;
}
@@ -89,11 +92,13 @@ static int sirfsoc_timer_set_next_event(unsigned long delta,
{
unsigned long now, next;
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
+ sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
next = now + delta;
writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0);
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
+ sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
return next - now > delta ? -ETIME : 0;
@@ -108,10 +113,12 @@ static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
WARN_ON(1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ writel_relaxed(val | BIT(0),
+ sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
- writel_relaxed(val & ~BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ writel_relaxed(val & ~BIT(0),
+ sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_RESUME:
@@ -123,10 +130,13 @@ static void sirfsoc_clocksource_suspend(struct clocksource *cs)
{
int i;
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
+ sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
- sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+ sirfsoc_timer_reg_val[i] =
+ readl_relaxed(sirfsoc_timer_base +
+ sirfsoc_timer_reg_list[i]);
}
static void sirfsoc_clocksource_resume(struct clocksource *cs)
@@ -134,10 +144,13 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs)
int i;
for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
- writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+ writel_relaxed(sirfsoc_timer_reg_val[i],
+ sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
+ sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
+ sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
}
static struct clock_event_device sirfsoc_clockevent = {
@@ -185,11 +198,8 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
unsigned long rate;
struct clk *clk;
- /* timer's input clock is io clock */
- clk = clk_get_sys("io", NULL);
-
+ clk = of_clk_get(np, 0);
BUG_ON(IS_ERR(clk));
-
rate = clk_get_rate(clk);
BUG_ON(rate < PRIMA2_CLOCK_FREQ);
@@ -202,7 +212,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1,
- sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
+ sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
@@ -216,4 +226,5 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
sirfsoc_clockevent_init();
}
-CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, "sirf,prima2-tick", sirfsoc_prima2_timer_init);
+CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
+ "sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
new file mode 100644
index 000000000000..e4c50ad2f9d9
--- /dev/null
+++ b/drivers/clocksource/versatile.c
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/clocksource.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+
+#define SYS_24MHZ 0x05c
+
+static void __iomem *versatile_sys_24mhz;
+
+static u32 notrace versatile_sys_24mhz_read(void)
+{
+ return readl(versatile_sys_24mhz);
+}
+
+static void __init versatile_sched_clock_init(struct device_node *node)
+{
+ void __iomem *base = of_iomap(node, 0);
+
+ if (!base)
+ return;
+
+ versatile_sys_24mhz = base + SYS_24MHZ;
+
+ setup_sched_clock(versatile_sys_24mhz_read, 32, 24000000);
+}
+CLOCKSOURCE_OF_DECLARE(versatile, "arm,vexpress-sysreg",
+ versatile_sched_clock_init);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b14f1d36f897..f612d68629dc 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -43,6 +43,8 @@ static struct cn_dev cdev;
static int cn_already_initialized;
/*
+ * Sends mult (multiple) cn_msg at a time.
+ *
* msg->seq and msg->ack are used to determine message genealogy.
* When someone sends message it puts there locally unique sequence
* and random acknowledge numbers. Sequence number may be copied into
@@ -62,10 +64,13 @@ static int cn_already_initialized;
* the acknowledgement number in the original message + 1, then it is
* a new message.
*
+ * If msg->len != len, then additional cn_msg messages are expected following
+ * the first msg.
+ *
* The message is sent to, the portid if given, the group if given, both if
* both, or if both are zero then the group is looked up and sent there.
*/
-int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
gfp_t gfp_mask)
{
struct cn_callback_entry *__cbq;
@@ -98,7 +103,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
if (!portid && !netlink_has_listeners(dev->nls, group))
return -ESRCH;
- size = sizeof(*msg) + msg->len;
+ size = sizeof(*msg) + len;
skb = nlmsg_new(size, gfp_mask);
if (!skb)
@@ -121,6 +126,14 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
gfp_mask);
return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
}
+EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
+
+/* same as cn_netlink_send_mult except msg->len is used for len */
+int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
+ gfp_t gfp_mask)
+{
+ return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
+}
EXPORT_SYMBOL_GPL(cn_netlink_send);
/*
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 580503513f0f..d2c7b4b8ffd5 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
- depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM
+ depends on CPU_EXYNOS4210
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ
config ARM_EXYNOS4X12_CPUFREQ
bool "SAMSUNG EXYNOS4x12"
- depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
+ depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ
config ARM_EXYNOS5250_CPUFREQ
bool "SAMSUNG EXYNOS5250"
- depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM
+ depends on SOC_EXYNOS5250
default y
select ARM_EXYNOS_CPUFREQ
help
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
+ cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
/*
* If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
PTR_ERR(cpu_reg));
}
- cpu_clk = devm_clk_get(cpu_dev, NULL);
+ cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
- goto out_put_node;
+ goto out_put_reg;
}
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_err("failed to init OPP table: %d\n", ret);
- goto out_put_node;
+ goto out_put_clk;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_put_node;
+ goto out_put_clk;
}
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_clk:
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
+out_put_reg:
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
out_put_node:
of_node_put(np);
return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&dbs_data->mutex);
+ if (!cpu_cdbs->cur_policy) {
+ mutex_unlock(&dbs_data->mutex);
+ break;
+ }
mutex_lock(&cpu_cdbs->timer_mutex);
if (policy->max < cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->min, CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cpu_cdbs->timer_mutex);
+ mutex_unlock(&dbs_data->mutex);
break;
}
return 0;
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f99cfe24e7bc..348c8bafe436 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -17,8 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
#include <linux/platform_device.h>
-
-#include <plat/cpu.h>
+#include <linux/of.h>
#include "exynos-cpufreq.h"
@@ -163,14 +162,22 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (!exynos_info)
return -ENOMEM;
- if (soc_is_exynos4210())
+ if (of_machine_is_compatible("samsung,exynos4210")) {
+ exynos_info->type = EXYNOS_SOC_4210;
ret = exynos4210_cpufreq_init(exynos_info);
- else if (soc_is_exynos4212() || soc_is_exynos4412())
+ } else if (of_machine_is_compatible("samsung,exynos4212")) {
+ exynos_info->type = EXYNOS_SOC_4212;
+ ret = exynos4x12_cpufreq_init(exynos_info);
+ } else if (of_machine_is_compatible("samsung,exynos4412")) {
+ exynos_info->type = EXYNOS_SOC_4412;
ret = exynos4x12_cpufreq_init(exynos_info);
- else if (soc_is_exynos5250())
+ } else if (of_machine_is_compatible("samsung,exynos5250")) {
+ exynos_info->type = EXYNOS_SOC_5250;
ret = exynos5250_cpufreq_init(exynos_info);
- else
- return 0;
+ } else {
+ pr_err("%s: Unknown SoC type\n", __func__);
+ return -ENODEV;
+ }
if (ret)
goto err_vdd_arm;
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index 3ddade8a5125..51af42e1b7fe 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -17,6 +17,13 @@ enum cpufreq_level_index {
L20,
};
+enum exynos_soc_type {
+ EXYNOS_SOC_4210,
+ EXYNOS_SOC_4212,
+ EXYNOS_SOC_4412,
+ EXYNOS_SOC_5250,
+};
+
#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
{ \
.freq = (f) * 1000, \
@@ -34,6 +41,7 @@ struct apll_freq {
};
struct exynos_dvfs_info {
+ enum exynos_soc_type type;
unsigned long mpll_freq_khz;
unsigned int pll_safe_idx;
struct clk *cpu_clk;
@@ -41,6 +49,7 @@ struct exynos_dvfs_info {
struct cpufreq_frequency_table *freq_table;
void (*set_freq)(unsigned int, unsigned int);
bool (*need_apll_change)(unsigned int, unsigned int);
+ void __iomem *cmu_regs;
};
#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
@@ -68,24 +77,21 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
}
#endif
-#include <plat/cpu.h>
-#include <mach/map.h>
-
-#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200)
-#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400)
+#define EXYNOS4_CLKSRC_CPU 0x14200
+#define EXYNOS4_CLKMUX_STATCPU 0x14400
-#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500)
-#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504)
-#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600)
-#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604)
+#define EXYNOS4_CLKDIV_CPU 0x14500
+#define EXYNOS4_CLKDIV_CPU1 0x14504
+#define EXYNOS4_CLKDIV_STATCPU 0x14600
+#define EXYNOS4_CLKDIV_STATCPU1 0x14604
#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
-#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000)
-#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100)
-#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400)
-#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500)
-#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504)
-#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600)
-#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604)
+#define EXYNOS5_APLL_LOCK 0x00000
+#define EXYNOS5_APLL_CON0 0x00100
+#define EXYNOS5_CLKMUX_STATCPU 0x00400
+#define EXYNOS5_CLKDIV_CPU0 0x00500
+#define EXYNOS5_CLKDIV_CPU1 0x00504
+#define EXYNOS5_CLKDIV_STATCPU0 0x00600
+#define EXYNOS5_CLKDIV_STATCPU1 0x00604
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 6384e5b9a347..61a54310a1b9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -16,6 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos4210_volt_table[] = {
1250000, 1150000, 1050000, 975000, 950000,
@@ -60,20 +63,20 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
tmp = apll_freq_4210[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU);
} while (tmp & 0x1111111);
/* Change Divider - CPU1 */
tmp = apll_freq_4210[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
} while (tmp & 0x11);
}
@@ -85,7 +88,7 @@ static void exynos4210_set_apll(unsigned int index)
clk_set_parent(moutcore, mout_mpll);
do {
- tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU)
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
>> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -96,7 +99,7 @@ static void exynos4210_set_apll(unsigned int index)
clk_set_parent(moutcore, mout_apll);
do {
- tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
@@ -115,8 +118,30 @@ static void exynos4210_set_frequency(unsigned int old_index,
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -143,6 +168,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 466c76ad335b..351a2074cfea 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -16,6 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos4x12_volt_table[] = {
1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
@@ -100,28 +103,26 @@ static struct apll_freq apll_freq_4412[] = {
static void exynos4x12_set_clkdiv(unsigned int div_index)
{
unsigned int tmp;
- unsigned int stat_cpu1;
/* Change Divider - CPU0 */
tmp = apll_freq_4x12[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
- while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
+ & 0x11111111)
cpu_relax();
/* Change Divider - CPU1 */
tmp = apll_freq_4x12[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
- if (soc_is_exynos4212())
- stat_cpu1 = 0x11;
- else
- stat_cpu1 = 0x111;
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
- while (__raw_readl(EXYNOS4_CLKDIV_STATCPU1) & stat_cpu1)
+ do {
cpu_relax();
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
+ } while (tmp != 0x0);
}
static void exynos4x12_set_apll(unsigned int index)
@@ -133,7 +134,7 @@ static void exynos4x12_set_apll(unsigned int index)
do {
cpu_relax();
- tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU)
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
>> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -145,7 +146,7 @@ static void exynos4x12_set_apll(unsigned int index)
do {
cpu_relax();
- tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
@@ -164,8 +165,30 @@ static void exynos4x12_set_frequency(unsigned int old_index,
int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -184,7 +207,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- if (soc_is_exynos4212())
+ if (info->type == EXYNOS_SOC_4212)
apll_freq_4x12 = apll_freq_4212;
else
apll_freq_4x12 = apll_freq_4412;
@@ -197,6 +220,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 363a0b3fe1b1..c91ce69dc631 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -16,8 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
-
-#include <mach/map.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -25,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos5250_volt_table[] = {
1300000, 1250000, 1225000, 1200000, 1150000,
@@ -87,17 +88,18 @@ static void set_clkdiv(unsigned int div_index)
tmp = apll_freq_5250[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
- while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
+ & 0x11111111)
cpu_relax();
/* Change Divider - CPU1 */
tmp = apll_freq_5250[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
- while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
cpu_relax();
}
@@ -111,7 +113,8 @@ static void set_apll(unsigned int index)
do {
cpu_relax();
- tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16);
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
+ >> 16);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -122,7 +125,7 @@ static void set_apll(unsigned int index)
do {
cpu_relax();
- tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
tmp &= (0x7 << 16);
} while (tmp != (0x1 << 16));
}
@@ -141,8 +144,30 @@ static void exynos5250_set_frequency(unsigned int old_index,
int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -169,6 +194,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos5250_freq_table;
info->set_freq = exynos5250_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index be1b2b5c9753..227ebf7c1eea 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -141,6 +141,7 @@ static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
{
+ cfg->mpll = _clk_mpll;
(cfg->info->set_fvco)(cfg);
}
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 97ccc31dbdd8..ae1d78ea7df7 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -1,6 +1,11 @@
#
# ARM CPU Idle drivers
#
+config ARM_ARMADA_370_XP_CPUIDLE
+ bool "CPU Idle Driver for Armada 370/XP family processors"
+ depends on ARCH_MVEBU
+ help
+ Select this to enable cpuidle on Armada 370/XP processors.
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
@@ -44,3 +49,9 @@ config ARM_AT91_CPUIDLE
depends on ARCH_AT91
help
Select this to enable cpuidle for AT91 processors
+
+config ARM_EXYNOS_CPUIDLE
+ bool "Cpu Idle Driver for the Exynos processors"
+ depends on ARCH_EXYNOS
+ help
+ Select this to enable cpuidle for Exynos processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index f71ae1b373c5..cd3ab59f8461 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,12 +7,14 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_ARMADA_370_XP_CPUIDLE) += cpuidle-armada-370-xp.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
+obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
###############################################################################
# POWERPC drivers
diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c
new file mode 100644
index 000000000000..28587d0f3947
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-armada-370-xp.c
@@ -0,0 +1,93 @@
+/*
+ * Marvell Armada 370 and Armada XP SoC cpuidle driver
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <asm/cpuidle.h>
+
+#define ARMADA_370_XP_MAX_STATES 3
+#define ARMADA_370_XP_FLAG_DEEP_IDLE 0x10000
+
+static int (*armada_370_xp_cpu_suspend)(int);
+
+static int armada_370_xp_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int ret;
+ bool deepidle = false;
+ cpu_pm_enter();
+
+ if (drv->states[index].flags & ARMADA_370_XP_FLAG_DEEP_IDLE)
+ deepidle = true;
+
+ ret = armada_370_xp_cpu_suspend(deepidle);
+ if (ret)
+ return ret;
+
+ cpu_pm_exit();
+
+ return index;
+}
+
+static struct cpuidle_driver armada_370_xp_idle_driver = {
+ .name = "armada_370_xp_idle",
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = armada_370_xp_enter_idle,
+ .exit_latency = 10,
+ .power_usage = 50,
+ .target_residency = 100,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "MV CPU IDLE",
+ .desc = "CPU power down",
+ },
+ .states[2] = {
+ .enter = armada_370_xp_enter_idle,
+ .exit_latency = 100,
+ .power_usage = 5,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ ARMADA_370_XP_FLAG_DEEP_IDLE,
+ .name = "MV CPU DEEP IDLE",
+ .desc = "CPU and L2 Fabric power down",
+ },
+ .state_count = ARMADA_370_XP_MAX_STATES,
+};
+
+static int armada_370_xp_cpuidle_probe(struct platform_device *pdev)
+{
+
+ armada_370_xp_cpu_suspend = (void *)(pdev->dev.platform_data);
+ return cpuidle_register(&armada_370_xp_idle_driver, NULL);
+}
+
+static struct platform_driver armada_370_xp_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-armada-370-xp",
+ .owner = THIS_MODULE,
+ },
+ .probe = armada_370_xp_cpuidle_probe,
+};
+
+module_platform_driver(armada_370_xp_cpuidle_plat_driver);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 370/XP cpu idle driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
new file mode 100644
index 000000000000..7c0151263828
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -0,0 +1,99 @@
+/* linux/arch/arm/mach-exynos/cpuidle.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/cpuidle.h>
+
+static void (*exynos_enter_aftr)(void);
+
+static int idle_finisher(unsigned long flags)
+{
+ exynos_enter_aftr();
+ cpu_do_idle();
+
+ return 1;
+}
+
+static int exynos_enter_core0_aftr(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ cpu_pm_enter();
+ cpu_suspend(0, idle_finisher);
+ cpu_pm_exit();
+
+ return index;
+}
+
+static int exynos_enter_lowpower(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int new_index = index;
+
+ /* AFTR can only be entered when cores other than CPU0 are offline */
+ if (num_online_cpus() > 1 || dev->cpu != 0)
+ new_index = drv->safe_state_index;
+
+ if (new_index == 0)
+ return arm_cpuidle_simple_enter(dev, drv, new_index);
+ else
+ return exynos_enter_core0_aftr(dev, drv, new_index);
+}
+
+static struct cpuidle_driver exynos_idle_driver = {
+ .name = "exynos_idle",
+ .owner = THIS_MODULE,
+ .states = {
+ [0] = ARM_CPUIDLE_WFI_STATE,
+ [1] = {
+ .enter = exynos_enter_lowpower,
+ .exit_latency = 300,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+ },
+ .state_count = 2,
+ .safe_state_index = 0,
+};
+
+static int exynos_cpuidle_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ exynos_enter_aftr = (void *)(pdev->dev.platform_data);
+
+ ret = cpuidle_register(&exynos_idle_driver, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver exynos_cpuidle_driver = {
+ .probe = exynos_cpuidle_probe,
+ .driver = {
+ .name = "exynos_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_BIDIRECTIONAL);
}
+ cnt = unmap->map_cnt;
mempool_free(unmap, __get_unmap_pool(cnt)->pool);
}
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
memset(unmap, 0, sizeof(*unmap));
kref_init(&unmap->kref);
unmap->dev = dev;
+ unmap->map_cnt = nr;
return unmap;
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
- IRQF_SHARED, "dw_dmac", dw);
- if (err)
- return err;
-
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+ err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+ "dw_dmac", dw);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
+ free_irq(chip->irq, dw);
tasklet_kill(&dw->tasklet);
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 926360c2db6a..d08c4dedef35 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -57,14 +57,48 @@
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
+struct edma_pset {
+ u32 len;
+ dma_addr_t addr;
+ struct edmacc_param param;
+};
+
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
+ enum dma_transfer_direction direction;
int cyclic;
int absync;
int pset_nr;
+ struct edma_chan *echan;
int processed;
- struct edmacc_param pset[0];
+
+ /*
+ * The following 4 elements are used for residue accounting.
+ *
+ * - processed_stat: the number of SG elements we have traversed
+ * so far to cover accounting. This is updated directly to processed
+ * during edma_callback and is always <= processed, because processed
+ * refers to the number of pending transfer (programmed to EDMA
+ * controller), where as processed_stat tracks number of transfers
+ * accounted for so far.
+ *
+ * - residue: The amount of bytes we have left to transfer for this desc
+ *
+ * - residue_stat: The residue in bytes of data we have covered
+ * so far for accounting. This is updated directly to residue
+ * during callbacks to keep it current.
+ *
+ * - sg_len: Tracks the length of the current intermediate transfer,
+ * this is required to update the residue during intermediate transfer
+ * completion callback.
+ */
+ int processed_stat;
+ u32 sg_len;
+ u32 residue;
+ u32 residue_stat;
+
+ struct edma_pset pset[0];
};
struct edma_cc;
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan)
/* Find out how many left */
left = edesc->pset_nr - edesc->processed;
nslots = min(MAX_NR_SG, left);
+ edesc->sg_len = 0;
/* Write descriptor PaRAM set(s) */
for (i = 0; i < nslots; i++) {
j = i + edesc->processed;
- edma_write_slot(echan->slot[i], &edesc->pset[j]);
- dev_dbg(echan->vchan.chan.device->dev,
+ edma_write_slot(echan->slot[i], &edesc->pset[j].param);
+ edesc->sg_len += edesc->pset[j].len;
+ dev_vdbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan)
" cidx\t%08x\n"
" lkrld\t%08x\n",
j, echan->ch_num, echan->slot[i],
- edesc->pset[j].opt,
- edesc->pset[j].src,
- edesc->pset[j].dst,
- edesc->pset[j].a_b_cnt,
- edesc->pset[j].ccnt,
- edesc->pset[j].src_dst_bidx,
- edesc->pset[j].src_dst_cidx,
- edesc->pset[j].link_bcntrld);
+ edesc->pset[j].param.opt,
+ edesc->pset[j].param.src,
+ edesc->pset[j].param.dst,
+ edesc->pset[j].param.a_b_cnt,
+ edesc->pset[j].param.ccnt,
+ edesc->pset[j].param.src_dst_bidx,
+ edesc->pset[j].param.src_dst_cidx,
+ edesc->pset[j].param.link_bcntrld);
/* Link to the previous slot if not the last set */
if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan)
}
if (edesc->processed <= MAX_NR_SG) {
- dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ dev_dbg(dev, "first transfer starting on channel %d\n",
+ echan->ch_num);
edma_start(echan->ch_num);
} else {
dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan)
* MAX_NR_SG
*/
if (echan->missed) {
- dev_dbg(dev, "missed event in execute detected\n");
+ dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
edma_clean_channel(echan->ch_num);
edma_stop(echan->ch_num);
edma_start(echan->ch_num);
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan,
return 0;
}
+static int edma_dma_pause(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_pause(echan->ch_num);
+ return 0;
+}
+
+static int edma_dma_resume(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_resume(echan->ch_num);
+ return 0;
+}
+
static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
config = (struct dma_slave_config *)arg;
ret = edma_slave_config(echan, config);
break;
+ case DMA_PAUSE:
+ ret = edma_dma_pause(echan);
+ break;
+
+ case DMA_RESUME:
+ ret = edma_dma_resume(echan);
+ break;
+
default:
ret = -ENOSYS;
}
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* @dma_length: Total length of the DMA transfer
* @direction: Direction of the transfer
*/
-static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
enum dma_slave_buswidth dev_width, unsigned int dma_length,
enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
+ struct edmacc_param *param = &epset->param;
int acnt, bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
acnt = dev_width;
+
+ /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
+ if (!burst)
+ burst = 1;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
cidx = acnt * bcnt;
}
+ epset->len = dma_length;
+
if (direction == DMA_MEM_TO_DEV) {
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
+ epset->addr = src_addr;
} else if (direction == DMA_DEV_TO_MEM) {
src_bidx = 0;
src_cidx = 0;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = dst_addr;
+ } else if (direction == DMA_MEM_TO_MEM) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
}
- pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
/* Configure A or AB synchronized transfers */
if (absync)
- pset->opt |= SYNCDIM;
+ param->opt |= SYNCDIM;
- pset->src = src_addr;
- pset->dst = dst_addr;
+ param->src = src_addr;
+ param->dst = dst_addr;
- pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
- pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
- pset->a_b_cnt = bcnt << 16 | acnt;
- pset->ccnt = ccnt;
+ param->a_b_cnt = bcnt << 16 | acnt;
+ param->ccnt = ccnt;
/*
* Only time when (bcntrld) auto reload is required is for
* A-sync case, and in this case, a requirement of reload value
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
* and then later will be populated by edma_execute.
*/
- pset->link_bcntrld = 0xffffffff;
+ param->link_bcntrld = 0xffffffff;
return absync;
}
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->pset_nr = sg_len;
+ edesc->residue = 0;
+ edesc->direction = direction;
+ edesc->echan = echan;
/* Allocate a PaRAM slot, if needed */
nslots = min_t(unsigned, MAX_NR_SG, sg_len);
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
- dev_err(dev, "Failed to allocate slot\n");
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
edesc->absync = ret;
+ edesc->residue += sg_dma_len(sg);
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
if (!((i+1) % MAX_NR_SG))
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ edesc->residue_stat = edesc->residue;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ int ret;
+ struct edma_desc *edesc;
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ if (unlikely(!echan || !len))
+ return NULL;
+
+ edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->pset_nr = 1;
+
+ ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
+ DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
+ if (ret < 0)
+ return NULL;
+
+ edesc->absync = ret;
+
+ /*
+ * Enable intermediate transfer chaining to re-trigger channel
+ * on completion of every TR, and enable transfer-completion
+ * interrupt on completion of the whole transfer.
+ */
+ edesc->pset[0].param.opt |= ITCCHEN;
+ edesc->pset[0].param.opt |= TCINTEN;
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc = kzalloc(sizeof(*edesc) + nslots *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->cyclic = 1;
edesc->pset_nr = nslots;
+ edesc->residue = edesc->residue_stat = buf_len;
+ edesc->direction = direction;
+ edesc->echan = echan;
- dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
- dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
- dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
+ dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
+ __func__, echan->ch_num, nslots, period_len, buf_len);
for (i = 0; i < nslots; i++) {
/* Allocate a PaRAM slot, if needed */
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
- dev_err(dev, "Failed to allocate slot\n");
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
else
src_addr += period_len;
- dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
- dev_dbg(dev,
+ dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+ dev_vdbg(dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
" cidx\t%08x\n"
" lkrld\t%08x\n",
i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ edesc->pset[i].param.opt,
+ edesc->pset[i].param.src,
+ edesc->pset[i].param.dst,
+ edesc->pset[i].param.a_b_cnt,
+ edesc->pset[i].param.ccnt,
+ edesc->pset[i].param.src_dst_bidx,
+ edesc->pset[i].param.src_dst_cidx,
+ edesc->pset[i].param.link_bcntrld);
edesc->absync = ret;
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* Enable interrupts for every period because callback
* has to be called for every period.
*/
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct edma_chan *echan = data;
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
- unsigned long flags;
struct edmacc_param p;
edesc = echan->edesc;
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
switch (ch_status) {
case EDMA_DMA_COMPLETE:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
if (edesc) {
if (edesc->cyclic) {
vchan_cyclic_callback(&edesc->vdesc);
} else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edesc->residue = 0;
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+
edma_execute(echan);
}
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
case EDMA_DMA_CC_ERROR:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
edma_trigger_channel(echan->ch_num);
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
default:
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
echan->alloced = true;
echan->slot[0] = echan->ch_num;
- dev_dbg(dev, "allocated channel for %u:%u\n",
+ dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
-static size_t edma_desc_size(struct edma_desc *edesc)
+static u32 edma_residue(struct edma_desc *edesc)
{
+ bool dst = edesc->direction == DMA_DEV_TO_MEM;
+ struct edma_pset *pset = edesc->pset;
+ dma_addr_t done, pos;
int i;
- size_t size;
-
- if (edesc->absync)
- for (size = i = 0; i < edesc->pset_nr; i++)
- size += (edesc->pset[i].a_b_cnt & 0xffff) *
- (edesc->pset[i].a_b_cnt >> 16) *
- edesc->pset[i].ccnt;
- else
- size = (edesc->pset[0].a_b_cnt & 0xffff) *
- (edesc->pset[0].a_b_cnt >> 16) +
- (edesc->pset[0].a_b_cnt & 0xffff) *
- (SZ_64K - 1) * edesc->pset[0].ccnt;
-
- return size;
+
+ /*
+ * We always read the dst/src position from the first RamPar
+ * pset. That's the one which is active now.
+ */
+ pos = edma_get_position(edesc->echan->slot[0], dst);
+
+ /*
+ * Cyclic is simple. Just subtract pset[0].addr from pos.
+ *
+ * We never update edesc->residue in the cyclic case, so we
+ * can tell the remaining room to the end of the circular
+ * buffer.
+ */
+ if (edesc->cyclic) {
+ done = pos - pset->addr;
+ edesc->residue_stat = edesc->residue - done;
+ return edesc->residue_stat;
+ }
+
+ /*
+ * For SG operation we catch up with the last processed
+ * status.
+ */
+ pset += edesc->processed_stat;
+
+ for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
+ /*
+ * If we are inside this pset address range, we know
+ * this is the active one. Get the current delta and
+ * stop walking the psets.
+ */
+ if (pos >= pset->addr && pos < pset->addr + pset->len)
+ return edesc->residue_stat - (pos - pset->addr);
+
+ /* Otherwise mark it done and update residue_stat. */
+ edesc->processed_stat++;
+ edesc->residue_stat -= pset->len;
+ }
+ return edesc->residue_stat;
}
/* Check request completion status */
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
- vdesc = vchan_find_desc(&echan->vchan, cookie);
- if (vdesc) {
- txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
- } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
- struct edma_desc *edesc = echan->edesc;
- txstate->residue = edma_desc_size(edesc);
- }
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ txstate->residue = edma_residue(echan->edesc);
+ else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc,
}
}
+#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int edma_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ return 0;
+}
+
static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+ dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
dma->device_tx_status = edma_tx_status;
dma->device_control = edma_control;
+ dma->device_slave_caps = edma_dma_device_slave_caps;
dma->dev = dev;
+ /*
+ * code using dma memcpy must make sure alignment of
+ * length is at dma->copy_align boundary.
+ */
+ dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
INIT_LIST_HEAD(&dma->channels);
}
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev)
dma_cap_zero(ecc->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
-
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = readl_relaxed(XOR_ACTIVATION(chan));
- activation |= 0x1;
- writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
struct sa11x0_dma_desc *txd_load;
unsigned sg_done;
struct sa11x0_dma_desc *txd_done;
-#ifdef CONFIG_PM_SLEEP
u32 dbs[2];
u32 dbt[2];
u32 dcsr;
-#endif
};
struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sa11x0_dma_suspend(struct device *dev)
{
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
.suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 33edd6766344..2c694b5297cc 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1018,7 +1018,7 @@ static void edac_ce_error(struct mem_ctl_info *mci,
}
edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
- if (mci->scrub_mode & SCRUB_SW_SRC) {
+ if (mci->scrub_mode == SCRUB_SW_SRC) {
/*
* Some memory controllers (called MCs below) can remap
* memory so that it is still available at a different
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 8d0450b9b9af..64b68320249f 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -275,7 +275,6 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
{
struct pci_dev *dev;
void __iomem *window;
- int err;
*ovrfl_pdev = NULL;
*ovrfl_window = NULL;
@@ -293,13 +292,8 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
if (dev == NULL)
return 1;
- err = pci_bus_add_device(dev);
- if (err) {
- i82875p_printk(KERN_ERR,
- "%s(): pci_bus_add_device() Failed\n",
- __func__);
- }
pci_bus_assign_resources(dev->bus);
+ pci_bus_add_device(dev);
}
*ovrfl_pdev = dev;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 51b9caa0b024..5f43620d580a 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -6,7 +6,6 @@
static struct amd_decoder_ops *fam_ops;
static u8 xec_mask = 0xf;
-static u8 nb_err_cpumask = 0xf;
static bool report_gart_errors;
static void (*nb_bus_decoder)(int node_id, struct mce *m);
@@ -852,7 +851,6 @@ static int __init mce_amd_init(void)
break;
case 0x14:
- nb_err_cpumask = 0x3;
fam_ops->mc0_mce = cat_mc0_mce;
fam_ops->mc1_mce = cat_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index be56e8ac95e6..aebde489c291 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -28,13 +28,13 @@ config EXTCON_ADC_JACK
Say Y here to enable extcon device driver based on ADC values.
config EXTCON_MAX14577
- tristate "MAX14577 EXTCON Support"
+ tristate "MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
select IRQ_DOMAIN
select REGMAP_I2C
help
If you say yes here you get support for the MUIC device of
- Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory
+ Maxim MAX14577/77836. The MAX14577/77836 MUIC is a USB port accessory
detector and switch.
config EXTCON_MAX77693
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index e23f1c2e5053..e18f95be3733 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -39,7 +39,7 @@
* @chan: iio channel being queried.
*/
struct adc_jack_data {
- struct extcon_dev edev;
+ struct extcon_dev *edev;
const char **cable_names;
int num_cables;
@@ -64,7 +64,7 @@ static void adc_jack_handler(struct work_struct *work)
ret = iio_read_channel_raw(data->chan, &adc_val);
if (ret < 0) {
- dev_err(&data->edev.dev, "read channel() error: %d\n", ret);
+ dev_err(&data->edev->dev, "read channel() error: %d\n", ret);
return;
}
@@ -80,7 +80,7 @@ static void adc_jack_handler(struct work_struct *work)
}
/* if no def has met, it means state = 0 (no cables attached) */
- extcon_set_state(&data->edev, state);
+ extcon_set_state(data->edev, state);
}
static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
@@ -102,33 +102,33 @@ static int adc_jack_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->edev.name = pdata->name;
-
if (!pdata->cable_names) {
- err = -EINVAL;
dev_err(&pdev->dev, "error: cable_names not defined.\n");
- goto out;
+ return -EINVAL;
}
- data->edev.dev.parent = &pdev->dev;
- data->edev.supported_cable = pdata->cable_names;
+ data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
+ if (IS_ERR(data->edev)) {
+ dev_err(&pdev->dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+ data->edev->dev.parent = &pdev->dev;
+ data->edev->name = pdata->name;
/* Check the length of array and set num_cables */
- for (i = 0; data->edev.supported_cable[i]; i++)
+ for (i = 0; data->edev->supported_cable[i]; i++)
;
if (i == 0 || i > SUPPORTED_CABLE_MAX) {
- err = -EINVAL;
dev_err(&pdev->dev, "error: pdata->cable_names size = %d\n",
i - 1);
- goto out;
+ return -EINVAL;
}
data->num_cables = i;
if (!pdata->adc_conditions ||
!pdata->adc_conditions[0].state) {
- err = -EINVAL;
dev_err(&pdev->dev, "error: adc_conditions not defined.\n");
- goto out;
+ return -EINVAL;
}
data->adc_conditions = pdata->adc_conditions;
@@ -138,10 +138,8 @@ static int adc_jack_probe(struct platform_device *pdev)
data->num_conditions = i;
data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
- if (IS_ERR(data->chan)) {
- err = PTR_ERR(data->chan);
- goto out;
- }
+ if (IS_ERR(data->chan))
+ return PTR_ERR(data->chan);
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
@@ -149,15 +147,14 @@ static int adc_jack_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- err = extcon_dev_register(&data->edev);
+ err = devm_extcon_dev_register(&pdev->dev, data->edev);
if (err)
- goto out;
+ return err;
data->irq = platform_get_irq(pdev, 0);
if (!data->irq) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- err = -ENODEV;
- goto err_irq;
+ return -ENODEV;
}
err = request_any_context_irq(data->irq, adc_jack_irq_thread,
@@ -165,15 +162,10 @@ static int adc_jack_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "error: irq %d\n", data->irq);
- goto err_irq;
+ return err;
}
return 0;
-
-err_irq:
- extcon_dev_unregister(&data->edev);
-out:
- return err;
}
static int adc_jack_remove(struct platform_device *pdev)
@@ -182,7 +174,6 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
- extcon_dev_unregister(&data->edev);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 98a14f6143a7..6c84e3d12043 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -91,7 +91,7 @@ struct arizona_extcon_info {
int hpdet_ip;
- struct extcon_dev edev;
+ struct extcon_dev *edev;
};
static const struct arizona_micd_config micd_default_modes[] = {
@@ -546,7 +546,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -581,7 +581,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
else
report = ARIZONA_CABLE_HEADPHONE;
- ret = extcon_set_cable_state_(&info->edev, report, true);
+ ret = extcon_set_cable_state_(info->edev, report, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report HP/line: %d\n",
ret);
@@ -664,7 +664,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_update_state(&info->edev,
+ ret = extcon_update_state(info->edev,
1 << ARIZONA_CABLE_HEADPHONE,
1 << ARIZONA_CABLE_HEADPHONE);
if (ret != 0)
@@ -723,7 +723,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_update_state(&info->edev,
+ ret = extcon_update_state(info->edev,
1 << ARIZONA_CABLE_HEADPHONE,
1 << ARIZONA_CABLE_HEADPHONE);
if (ret != 0)
@@ -764,7 +764,7 @@ static void arizona_micd_detect(struct work_struct *work)
mutex_lock(&info->lock);
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -812,7 +812,7 @@ static void arizona_micd_detect(struct work_struct *work)
if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
arizona_identify_headphone(info);
- ret = extcon_update_state(&info->edev,
+ ret = extcon_update_state(info->edev,
1 << ARIZONA_CABLE_MICROPHONE,
1 << ARIZONA_CABLE_MICROPHONE);
@@ -999,7 +999,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (info->last_jackdet == present) {
dev_dbg(arizona->dev, "Detected jack\n");
- ret = extcon_set_cable_state_(&info->edev,
+ ret = extcon_set_cable_state_(info->edev,
ARIZONA_CABLE_MECHANICAL, true);
if (ret != 0)
@@ -1038,7 +1038,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
info->micd_ranges[i].key, 0);
input_sync(info->input);
- ret = extcon_update_state(&info->edev, 0xffffffff, 0);
+ ret = extcon_update_state(info->edev, 0xffffffff, 0);
if (ret != 0)
dev_err(arizona->dev, "Removal report failed: %d\n",
ret);
@@ -1105,15 +1105,14 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
info->micvdd = devm_regulator_get(arizona->dev, "MICVDD");
if (IS_ERR(info->micvdd)) {
ret = PTR_ERR(info->micvdd);
dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret);
- goto err;
+ return ret;
}
mutex_init(&info->lock);
@@ -1151,15 +1150,19 @@ static int arizona_extcon_probe(struct platform_device *pdev)
break;
}
- info->edev.name = "Headset Jack";
- info->edev.dev.parent = arizona->dev;
- info->edev.supported_cable = arizona_cable;
+ info->edev = devm_extcon_dev_allocate(&pdev->dev, arizona_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(&pdev->dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+ info->edev->name = "Headset Jack";
+ info->edev->dev.parent = arizona->dev;
- ret = extcon_dev_register(&info->edev);
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret < 0) {
dev_err(arizona->dev, "extcon_dev_register() failed: %d\n",
ret);
- goto err;
+ return ret;
}
info->input = devm_input_allocate_device(&pdev->dev);
@@ -1410,8 +1413,6 @@ err_rise:
err_input:
err_register:
pm_runtime_disable(&pdev->dev);
- extcon_dev_unregister(&info->edev);
-err:
return ret;
}
@@ -1445,7 +1446,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
ARIZONA_JD1_ENA, 0);
arizona_clk32k_disable(arizona);
- extcon_dev_unregister(&info->edev);
return 0;
}
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 7ab21aa6eaa1..18d42c0e4581 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -565,6 +565,100 @@ static void dummy_sysfs_dev_release(struct device *dev)
{
}
+/*
+ * extcon_dev_allocate() - Allocate the memory of extcon device.
+ * @supported_cable: Array of supported cable names ending with NULL.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ *
+ * This function allocates the memory for extcon device without allocating
+ * memory in each extcon provider driver and initialize default setting for
+ * extcon device.
+ *
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail
+ */
+struct extcon_dev *extcon_dev_allocate(const char **supported_cable)
+{
+ struct extcon_dev *edev;
+
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+
+ edev->max_supported = 0;
+ edev->supported_cable = supported_cable;
+
+ return edev;
+}
+
+/*
+ * extcon_dev_free() - Free the memory of extcon device.
+ * @edev: the extcon device to free
+ */
+void extcon_dev_free(struct extcon_dev *edev)
+{
+ kfree(edev);
+}
+EXPORT_SYMBOL_GPL(extcon_dev_free);
+
+static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
+{
+ struct extcon_dev **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+static void devm_extcon_dev_release(struct device *dev, void *res)
+{
+ extcon_dev_free(*(struct extcon_dev **)res);
+}
+
+/**
+ * devm_extcon_dev_allocate - Allocate managed extcon device
+ * @dev: device owning the extcon device being created
+ * @supported_cable: Array of supported cable names ending with NULL.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ *
+ * This function manages automatically the memory of extcon device using device
+ * resource management and simplify the control of freeing the memory of extcon
+ * device.
+ *
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail
+ */
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const char **supported_cable)
+{
+ struct extcon_dev **ptr, *edev;
+
+ ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ edev = extcon_dev_allocate(supported_cable);
+ if (IS_ERR(edev)) {
+ devres_free(ptr);
+ return edev;
+ }
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
+
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_release,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
+
/**
* extcon_dev_register() - Register a new extcon device
* @edev : the new extcon device (should be allocated before calling)
@@ -819,6 +913,63 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
+static void devm_extcon_dev_unreg(struct device *dev, void *res)
+{
+ extcon_dev_unregister(*(struct extcon_dev **)res);
+}
+
+/**
+ * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
+ * @dev: device to allocate extcon device
+ * @edev: the new extcon device to register
+ *
+ * Managed extcon_dev_register() function. If extcon device is attached with
+ * this function, that extcon device is automatically unregistered on driver
+ * detach. Internally this function calls extcon_dev_register() function.
+ * To get more information, refer that function.
+ *
+ * If extcon device is registered with this function and the device needs to be
+ * unregistered separately, devm_extcon_dev_unregister() should be used.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
+{
+ struct extcon_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = extcon_dev_register(edev);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
+
+/**
+ * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
+ * @dev: device the extcon belongs to
+ * @edev: the extcon device to unregister
+ *
+ * Unregister extcon device that is registered with devm_extcon_dev_register()
+ * function.
+ */
+void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
+
#ifdef CONFIG_OF
/*
* extcon_get_edev_by_phandle - Get the extcon device from devicetree
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 13d522255d81..645b28356819 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -32,7 +32,7 @@
#include <linux/extcon/extcon-gpio.h>
struct gpio_extcon_data {
- struct extcon_dev edev;
+ struct extcon_dev *edev;
unsigned gpio;
bool gpio_active_low;
const char *state_on;
@@ -53,7 +53,7 @@ static void gpio_extcon_work(struct work_struct *work)
state = gpio_get_value(data->gpio);
if (data->gpio_active_low)
state = !state;
- extcon_set_state(&data->edev, state);
+ extcon_set_state(data->edev, state);
}
static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
@@ -67,9 +67,10 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
{
- struct gpio_extcon_data *extcon_data =
- container_of(edev, struct gpio_extcon_data, edev);
+ struct device *dev = edev->dev.parent;
+ struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev);
const char *state;
+
if (extcon_get_state(edev))
state = extcon_data->state_on;
else
@@ -98,15 +99,21 @@ static int gpio_extcon_probe(struct platform_device *pdev)
if (!extcon_data)
return -ENOMEM;
- extcon_data->edev.name = pdata->name;
- extcon_data->edev.dev.parent = &pdev->dev;
+ extcon_data->edev = devm_extcon_dev_allocate(&pdev->dev, NULL);
+ if (IS_ERR(extcon_data->edev)) {
+ dev_err(&pdev->dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+ extcon_data->edev->name = pdata->name;
+ extcon_data->edev->dev.parent = &pdev->dev;
+
extcon_data->gpio = pdata->gpio;
extcon_data->gpio_active_low = pdata->gpio_active_low;
extcon_data->state_on = pdata->state_on;
extcon_data->state_off = pdata->state_off;
extcon_data->check_on_resume = pdata->check_on_resume;
if (pdata->state_on && pdata->state_off)
- extcon_data->edev.print_state = extcon_gpio_print_state;
+ extcon_data->edev->print_state = extcon_gpio_print_state;
ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
pdev->name);
@@ -121,34 +128,27 @@ static int gpio_extcon_probe(struct platform_device *pdev)
msecs_to_jiffies(pdata->debounce);
}
- ret = extcon_dev_register(&extcon_data->edev);
+ ret = devm_extcon_dev_register(&pdev->dev, extcon_data->edev);
if (ret < 0)
return ret;
INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work);
extcon_data->irq = gpio_to_irq(extcon_data->gpio);
- if (extcon_data->irq < 0) {
- ret = extcon_data->irq;
- goto err;
- }
+ if (extcon_data->irq < 0)
+ return extcon_data->irq;
ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler,
pdata->irq_flags, pdev->name,
extcon_data);
if (ret < 0)
- goto err;
+ return ret;
platform_set_drvdata(pdev, extcon_data);
/* Perform initial detection */
gpio_extcon_work(&extcon_data->work.work);
return 0;
-
-err:
- extcon_dev_unregister(&extcon_data->edev);
-
- return ret;
}
static int gpio_extcon_remove(struct platform_device *pdev)
@@ -157,7 +157,6 @@ static int gpio_extcon_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&extcon_data->work);
free_irq(extcon_data->irq, extcon_data);
- extcon_dev_unregister(&extcon_data->edev);
return 0;
}
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 3846941801b8..d49e891b5675 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,8 +1,9 @@
/*
- * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC
+ * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
*
- * Copyright (C) 2013 Samsung Electrnoics
+ * Copyright (C) 2013,2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +25,6 @@
#include <linux/mfd/max14577-private.h>
#include <linux/extcon.h>
-#define DEV_NAME "max14577-muic"
#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
enum max14577_muic_adc_debounce_time {
@@ -40,6 +40,42 @@ enum max14577_muic_status {
MAX14577_MUIC_STATUS_END,
};
+/**
+ * struct max14577_muic_irq
+ * @irq: the index of irq list of MUIC device.
+ * @name: the name of irq.
+ * @virq: the virtual irq to use irq domain
+ */
+struct max14577_muic_irq {
+ unsigned int irq;
+ const char *name;
+ unsigned int virq;
+};
+
+static struct max14577_muic_irq max14577_muic_irqs[] = {
+ { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
+ { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
+ { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
+ { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
+ { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
+ { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
+ { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
+ { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
+};
+
+static struct max14577_muic_irq max77836_muic_irqs[] = {
+ { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
+ { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
+ { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
+ { MAX77836_IRQ_INT1_ADC1K, "muic-ADC1K" },
+ { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
+ { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
+ { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
+ { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
+ { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
+ { MAX77836_IRQ_INT2_VIDRM, "muic-VIDRM" },
+};
+
struct max14577_muic_info {
struct device *dev;
struct max14577 *max14577;
@@ -48,6 +84,8 @@ struct max14577_muic_info {
int prev_chg_type;
u8 status[MAX14577_MUIC_STATUS_END];
+ struct max14577_muic_irq *muic_irqs;
+ unsigned int muic_irqs_num;
bool irq_adc;
bool irq_chg;
struct work_struct irq_work;
@@ -74,29 +112,6 @@ enum max14577_muic_cable_group {
MAX14577_CABLE_GROUP_CHG,
};
-/**
- * struct max14577_muic_irq
- * @irq: the index of irq list of MUIC device.
- * @name: the name of irq.
- * @virq: the virtual irq to use irq domain
- */
-struct max14577_muic_irq {
- unsigned int irq;
- const char *name;
- unsigned int virq;
-};
-
-static struct max14577_muic_irq muic_irqs[] = {
- { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
- { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
- { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
- { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
- { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
- { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
- { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
- { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
-};
-
/* Define supported accessory type */
enum max14577_muic_acc_type {
MAX14577_MUIC_ADC_GROUND = 0x0,
@@ -528,21 +543,12 @@ static void max14577_muic_irq_work(struct work_struct *work)
return;
}
-static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
+/*
+ * Sets irq_adc or irq_chg in max14577_muic_info and returns 1.
+ * Returns 0 if irq_type does not match registered IRQ for this device type.
+ */
+static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
{
- struct max14577_muic_info *info = data;
- int i, irq_type = -1;
-
- /*
- * We may be called multiple times for different nested IRQ-s.
- * Including changes in INT1_ADC and INT2_CGHTYP at once.
- * However we only need to know whether it was ADC, charger
- * or both interrupts so decode IRQ and turn on proper flags.
- */
- for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
- if (irq == muic_irqs[i].virq)
- irq_type = muic_irqs[i].irq;
-
switch (irq_type) {
case MAX14577_IRQ_INT1_ADC:
case MAX14577_IRQ_INT1_ADCLOW:
@@ -550,7 +556,7 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
/* Handle all of accessory except for
type of charger accessory */
info->irq_adc = true;
- break;
+ return 1;
case MAX14577_IRQ_INT2_CHGTYP:
case MAX14577_IRQ_INT2_CHGDETRUN:
case MAX14577_IRQ_INT2_DCDTMR:
@@ -558,8 +564,62 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
case MAX14577_IRQ_INT2_VBVOLT:
/* Handle charger accessory */
info->irq_chg = true;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Sets irq_adc or irq_chg in max14577_muic_info and returns 1.
+ * Returns 0 if irq_type does not match registered IRQ for this device type.
+ */
+static int max77836_parse_irq(struct max14577_muic_info *info, int irq_type)
+{
+ /* First check common max14577 interrupts */
+ if (max14577_parse_irq(info, irq_type))
+ return 1;
+
+ switch (irq_type) {
+ case MAX77836_IRQ_INT1_ADC1K:
+ info->irq_adc = true;
+ return 1;
+ case MAX77836_IRQ_INT2_VIDRM:
+ /* Handle charger accessory */
+ info->irq_chg = true;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
+{
+ struct max14577_muic_info *info = data;
+ int i, irq_type = -1;
+ bool irq_parsed;
+
+ /*
+ * We may be called multiple times for different nested IRQ-s.
+ * Including changes in INT1_ADC and INT2_CGHTYP at once.
+ * However we only need to know whether it was ADC, charger
+ * or both interrupts so decode IRQ and turn on proper flags.
+ */
+ for (i = 0; i < info->muic_irqs_num; i++)
+ if (irq == info->muic_irqs[i].virq)
+ irq_type = info->muic_irqs[i].irq;
+
+ switch (info->max14577->dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ irq_parsed = max77836_parse_irq(info, irq_type);
break;
+ case MAXIM_DEVICE_TYPE_MAX14577:
default:
+ irq_parsed = max14577_parse_irq(info, irq_type);
+ break;
+ }
+
+ if (!irq_parsed) {
dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n",
irq_type);
return IRQ_HANDLED;
@@ -644,13 +704,24 @@ static int max14577_muic_probe(struct platform_device *pdev)
INIT_WORK(&info->irq_work, max14577_muic_irq_work);
+ switch (max14577->dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ info->muic_irqs = max77836_muic_irqs;
+ info->muic_irqs_num = ARRAY_SIZE(max77836_muic_irqs);
+ break;
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ info->muic_irqs = max14577_muic_irqs;
+ info->muic_irqs_num = ARRAY_SIZE(max14577_muic_irqs);
+ }
+
/* Support irq domain for max14577 MUIC device */
- for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
- struct max14577_muic_irq *muic_irq = &muic_irqs[i];
+ for (i = 0; i < info->muic_irqs_num; i++) {
+ struct max14577_muic_irq *muic_irq = &info->muic_irqs[i];
unsigned int virq = 0;
virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
- if (!virq)
+ if (virq <= 0)
return -EINVAL;
muic_irq->virq = virq;
@@ -668,14 +739,16 @@ static int max14577_muic_probe(struct platform_device *pdev)
}
/* Initialize extcon device */
- info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL);
- if (!info->edev) {
+ info->edev = devm_extcon_dev_allocate(&pdev->dev,
+ max14577_extcon_cable);
+ if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
- info->edev->name = DEV_NAME;
- info->edev->supported_cable = max14577_extcon_cable;
- ret = extcon_dev_register(info->edev);
+
+ info->edev->name = dev_name(&pdev->dev);
+
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return ret;
@@ -694,7 +767,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
MAX14577_REG_DEVICEID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_extcon;
+ return ret;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
@@ -710,19 +783,10 @@ static int max14577_muic_probe(struct platform_device *pdev)
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
- ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
delay_jiffies);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to schedule delayed work for cable detect\n");
- goto err_extcon;
- }
return ret;
-
-err_extcon:
- extcon_dev_unregister(info->edev);
- return ret;
}
static int max14577_muic_remove(struct platform_device *pdev)
@@ -730,23 +794,30 @@ static int max14577_muic_remove(struct platform_device *pdev)
struct max14577_muic_info *info = platform_get_drvdata(pdev);
cancel_work_sync(&info->irq_work);
- extcon_dev_unregister(info->edev);
return 0;
}
+static const struct platform_device_id max14577_muic_id[] = {
+ { "max14577-muic", MAXIM_DEVICE_TYPE_MAX14577, },
+ { "max77836-muic", MAXIM_DEVICE_TYPE_MAX77836, },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max14577_muic_id);
+
static struct platform_driver max14577_muic_driver = {
.driver = {
- .name = DEV_NAME,
+ .name = "max14577-muic",
.owner = THIS_MODULE,
},
.probe = max14577_muic_probe,
.remove = max14577_muic_remove,
+ .id_table = max14577_muic_id,
};
module_platform_driver(max14577_muic_driver);
-MODULE_DESCRIPTION("MAXIM 14577 Extcon driver");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_DESCRIPTION("Maxim 14577/77836 Extcon driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:extcon-max14577");
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index da268fbc901b..2c7c3e191591 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1175,25 +1175,24 @@ static int max77693_muic_probe(struct platform_device *pdev)
}
/* Initialize extcon device */
- info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
- GFP_KERNEL);
- if (!info->edev) {
+ info->edev = devm_extcon_dev_allocate(&pdev->dev,
+ max77693_extcon_cable);
+ if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
goto err_irq;
}
info->edev->name = DEV_NAME;
info->edev->dev.parent = &pdev->dev;
- info->edev->supported_cable = max77693_extcon_cable;
- ret = extcon_dev_register(info->edev);
+
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
goto err_irq;
}
-
/* Initialize MUIC register by using platform data or default data */
- if (pdata->muic_data) {
+ if (pdata && pdata->muic_data) {
init_data = pdata->muic_data->init_data;
num_init_data = pdata->muic_data->num_init_data;
} else {
@@ -1226,7 +1225,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
= init_data[i].data;
}
- if (pdata->muic_data) {
+ if (pdata && pdata->muic_data) {
struct max77693_muic_platform_data *muic_pdata
= pdata->muic_data;
@@ -1267,7 +1266,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_extcon;
+ goto err_irq;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
@@ -1283,12 +1282,11 @@ static int max77693_muic_probe(struct platform_device *pdev)
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
- schedule_delayed_work(&info->wq_detcable, delay_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ delay_jiffies);
return ret;
-err_extcon:
- extcon_dev_unregister(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
@@ -1304,7 +1302,6 @@ static int max77693_muic_remove(struct platform_device *pdev)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
input_unregister_device(info->dock);
- extcon_dev_unregister(info->edev);
return 0;
}
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 6a00464658c5..d9f7f1baaa03 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -699,23 +699,22 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
/* External connector */
- info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
- GFP_KERNEL);
- if (!info->edev) {
+ info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable);
+ if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
goto err_irq;
}
info->edev->name = DEV_NAME;
info->edev->dev.parent = &pdev->dev;
- info->edev->supported_cable = max8997_extcon_cable;
- ret = extcon_dev_register(info->edev);
+
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
goto err_irq;
}
- if (pdata->muic_pdata) {
+ if (pdata && pdata->muic_pdata) {
struct max8997_muic_platform_data *muic_pdata
= pdata->muic_pdata;
@@ -770,7 +769,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
- schedule_delayed_work(&info->wq_detcable, delay_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ delay_jiffies);
return 0;
@@ -789,8 +789,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
- extcon_dev_unregister(info->edev);
-
return 0;
}
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index ddff2b72f0a8..7417ce84eb2d 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/err.h>
#include <linux/mfd/palmas.h>
#include <linux/of.h>
@@ -56,7 +57,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) {
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
- extcon_set_cable_state(&palmas_usb->edev, "USB", true);
+ extcon_set_cable_state(palmas_usb->edev, "USB", true);
dev_info(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -65,7 +66,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
} else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) {
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(&palmas_usb->edev, "USB", false);
+ extcon_set_cable_state(palmas_usb->edev, "USB", false);
dev_info(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -92,7 +93,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
+ extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
@@ -100,17 +101,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
+ extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
+ extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
+ extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true);
dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
}
@@ -186,13 +187,20 @@ static int palmas_usb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, palmas_usb);
- palmas_usb->edev.supported_cable = palmas_extcon_cable;
- palmas_usb->edev.dev.parent = palmas_usb->dev;
- palmas_usb->edev.mutually_exclusive = mutually_exclusive;
+ palmas_usb->edev = devm_extcon_dev_allocate(&pdev->dev,
+ palmas_extcon_cable);
+ if (IS_ERR(palmas_usb->edev)) {
+ dev_err(&pdev->dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+ palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL);
+ palmas_usb->edev->dev.parent = palmas_usb->dev;
+ palmas_usb->edev->mutually_exclusive = mutually_exclusive;
- status = extcon_dev_register(&palmas_usb->edev);
+ status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
+ kfree(palmas_usb->edev->name);
return status;
}
@@ -206,7 +214,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
- goto fail_extcon;
+ kfree(palmas_usb->edev->name);
+ return status;
}
}
@@ -220,25 +229,21 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
- goto fail_extcon;
+ kfree(palmas_usb->edev->name);
+ return status;
}
}
palmas_enable_irq(palmas_usb);
device_set_wakeup_capable(&pdev->dev, true);
return 0;
-
-fail_extcon:
- extcon_dev_unregister(&palmas_usb->edev);
-
- return status;
}
static int palmas_usb_remove(struct platform_device *pdev)
{
struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
- extcon_dev_unregister(&palmas_usb->edev);
+ kfree(palmas_usb->edev->name);
return 0;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define LOCAL_BUS 0xffc0
-/* arbitrarily chosen maximum range for physical DMA: 128 TB */
-#define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
+/* OHCI-1394's default upper bound for physical DMA: 4 GB */
+#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
version >> 16, version & 0xff, ohci->card.index,
ohci->n_ir, ohci->n_it, ohci->quirks,
reg_read(ohci, OHCI1394_PhyUpperBound) ?
- ", >4 GB phys DMA" : "");
+ ", physUB" : "");
return 0;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index a86c49a605c6..4a1b5113e527 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -56,6 +56,7 @@ config GPIO_ACPI
depends on ACPI
config GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN
bool
config DEBUG_GPIO
@@ -243,6 +244,15 @@ config GPIO_OCTEON
Say yes here to support the on-chip GPIO lines on the OCTEON
family of SOCs.
+config GPIO_OMAP
+ bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS
+ default y if ARCH_OMAP
+ depends on ARM
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to enable GPIO support for TI OMAP SoCs.
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -259,7 +269,7 @@ config GPIO_PXA
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
- depends on ARM
+ depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST)
help
Say yes here to support GPIO on Renesas R-Car SoCs.
@@ -510,6 +520,7 @@ config GPIO_PCA953X
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
depends on GPIO_PCA953X=y
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable the pca953x to be used as an interrupt
controller. It requires the driver to be built in the kernel.
@@ -579,6 +590,7 @@ config GPIO_STP_XWAY
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
+ select GPIOLIB_IRQCHIP
help
This enables support for the GPIOs found on the TC3589X
I/O Expander.
@@ -699,13 +711,13 @@ config GPIO_AMD8111
config GPIO_INTEL_MID
bool "Intel Mid GPIO support"
depends on PCI && X86
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say Y here to support Intel Mid GPIO.
config GPIO_PCH
tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
- depends on PCI && X86
+ depends on PCI && (X86_32 || COMPILE_TEST)
select GENERIC_IRQ_CHIP
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
@@ -739,7 +751,7 @@ config GPIO_SODAVILLE
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
- depends on MFD_TIMBERDALE && HAS_IOMEM
+ depends on MFD_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 6309aff1d806..d10f6a9d875a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -58,7 +58,7 @@ obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
-obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
+obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 307464fd015f..65978cf85f79 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -52,6 +52,22 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get);
/**
+ * devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ *
+ * Managed gpiod_get_optional(). GPIO descriptors returned from this function
+ * are automatically disposed on driver detach. See gpiod_get_optional() for
+ * detailed information about behavior and return values.
+ */
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
+ const char *con_id)
+{
+ return devm_gpiod_get_index_optional(dev, con_id, 0);
+}
+EXPORT_SYMBOL(devm_gpiod_get_optional);
+
+/**
* devm_gpiod_get_index - Resource-managed gpiod_get_index()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
@@ -87,6 +103,33 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get_index);
/**
+ * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ * @index: index of the GPIO to obtain in the consumer
+ *
+ * Managed gpiod_get_index_optional(). GPIO descriptors returned from this
+ * function are automatically disposed on driver detach. See
+ * gpiod_get_index_optional() for detailed information about behavior and
+ * return values.
+ */
+struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
+ const char *con_id,
+ unsigned int index)
+{
+ struct gpio_desc *desc;
+
+ desc = devm_gpiod_get_index(dev, con_id, index);
+ if (IS_ERR(desc)) {
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_index_optional);
+
+/**
* devm_gpiod_put - Resource-managed gpiod_put()
* @desc: GPIO descriptor to dispose of
*
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 613265944e2e..f1ade8fa3218 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -106,10 +106,8 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
}
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&pdev->dev, "failed to alloc memory\n");
+ if (dev == NULL)
return -ENOMEM;
- }
dev->master = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d974020b78bb..ef19bc33f2bd 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -379,10 +379,8 @@ static int adp5588_gpio_probe(struct i2c_client *client,
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&client->dev, "failed to alloc memory\n");
+ if (dev == NULL)
return -ENOMEM;
- }
dev->client = client;
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index ecb3ca2d1d10..6557147d9331 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -178,7 +178,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
struct bt8xxgpio *bg;
int err;
- bg = kzalloc(sizeof(*bg), GFP_KERNEL);
+ bg = devm_kzalloc(&dev->dev, sizeof(struct bt8xxgpio), GFP_KERNEL);
if (!bg)
return -ENOMEM;
@@ -188,9 +188,9 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
err = pci_enable_device(dev);
if (err) {
printk(KERN_ERR "bt8xxgpio: Can't enable device.\n");
- goto err_freebg;
+ return err;
}
- if (!request_mem_region(pci_resource_start(dev, 0),
+ if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0),
pci_resource_len(dev, 0),
"bt8xxgpio")) {
printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n",
@@ -201,11 +201,11 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
pci_set_master(dev);
pci_set_drvdata(dev, bg);
- bg->mmio = ioremap(pci_resource_start(dev, 0), 0x1000);
+ bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000);
if (!bg->mmio) {
printk(KERN_ERR "bt8xxgpio: ioremap() failed\n");
err = -EIO;
- goto err_release_mem;
+ goto err_disable;
}
/* Disable interrupts */
@@ -220,18 +220,13 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
err = gpiochip_add(&bg->gpio);
if (err) {
printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n");
- goto err_release_mem;
+ goto err_disable;
}
return 0;
-err_release_mem:
- release_mem_region(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
err_disable:
pci_disable_device(dev);
-err_freebg:
- kfree(bg);
return err;
}
@@ -250,8 +245,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
-
- kfree(bg);
}
#ifdef CONFIG_PM
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 339f9dac591b..9f0682534e2f 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -230,10 +230,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips = devm_kzalloc(dev,
ngpio * sizeof(struct davinci_gpio_controller),
GFP_KERNEL);
- if (!chips) {
- dev_err(dev, "Memory allocation failed\n");
+ if (!chips)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index ed5711f77e2d..cd3b81435274 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -198,6 +198,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
break;
}
+ irq_setup_alt_chip(d, type);
+
writel(level, gpio->regs + GPIO_INTTYPE_LEVEL);
writel(polarity, gpio->regs + GPIO_INT_POLARITY);
spin_unlock_irqrestore(&bgc->lock, flags);
@@ -213,7 +215,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct irq_chip_generic *irq_gc;
unsigned int hwirq, ngpio = gc->ngpio;
struct irq_chip_type *ct;
- int err, irq;
+ int err, irq, i;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
@@ -227,7 +229,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
if (!gpio->domain)
return;
- err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 1,
+ err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
"gpio-dwapb", handle_level_irq,
IRQ_NOREQUEST, 0,
IRQ_GC_INIT_NESTED_LOCK);
@@ -248,20 +250,24 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
irq_gc->reg_base = gpio->regs;
irq_gc->private = gpio;
- ct = irq_gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = dwapb_irq_set_type;
- ct->chip.irq_enable = dwapb_irq_enable;
- ct->chip.irq_disable = dwapb_irq_disable;
- ct->chip.irq_request_resources = dwapb_irq_reqres;
- ct->chip.irq_release_resources = dwapb_irq_relres;
- ct->regs.ack = GPIO_PORTA_EOI;
- ct->regs.mask = GPIO_INTMASK;
-
- irq_setup_generic_chip(irq_gc, IRQ_MSK(port->bgc.gc.ngpio),
- IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
+ for (i = 0; i < 2; i++) {
+ ct = &irq_gc->chip_types[i];
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = dwapb_irq_set_type;
+ ct->chip.irq_enable = dwapb_irq_enable;
+ ct->chip.irq_disable = dwapb_irq_disable;
+ ct->chip.irq_request_resources = dwapb_irq_reqres;
+ ct->chip.irq_release_resources = dwapb_irq_relres;
+ ct->regs.ack = GPIO_PORTA_EOI;
+ ct->regs.mask = GPIO_INTMASK;
+ ct->type = IRQ_TYPE_LEVEL_MASK;
+ }
+
+ irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ irq_gc->chip_types[1].handler = handle_edge_irq;
irq_set_chained_handler(irq, dwapb_irq_handler);
irq_set_handler_data(irq, gpio);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 8765bd6f48e1..cde36054c387 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -212,7 +212,7 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg,
{
/* upper 16 bits contains mask and lower 16 actual value */
em_gio_write(gpio_to_priv(chip), reg,
- (1 << (shift + 16)) | (value << shift));
+ (BIT(shift + 16)) | (value << shift));
}
static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -284,7 +284,6 @@ static int em_gio_probe(struct platform_device *pdev)
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
ret = -ENOMEM;
goto err0;
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 80829f3c6543..dcc2bb4074ef 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -344,37 +344,24 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
{
struct ep93xx_gpio *ep93xx_gpio;
struct resource *res;
- void __iomem *mmio;
int i;
- int ret;
+ struct device *dev = &pdev->dev;
- ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL);
+ ep93xx_gpio = devm_kzalloc(dev, sizeof(struct ep93xx_gpio), GFP_KERNEL);
if (!ep93xx_gpio)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto exit_free;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- ret = -EBUSY;
- goto exit_free;
- }
-
- mmio = ioremap(res->start, resource_size(res));
- if (!mmio) {
- ret = -ENXIO;
- goto exit_release;
- }
- ep93xx_gpio->mmio_base = mmio;
+ ep93xx_gpio->mmio_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ep93xx_gpio->mmio_base))
+ return PTR_ERR(ep93xx_gpio->mmio_base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
- if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank))
+ if (ep93xx_gpio_add_bank(bgc, &pdev->dev,
+ ep93xx_gpio->mmio_base, bank))
dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
bank->label);
}
@@ -382,13 +369,6 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
ep93xx_gpio_init_irq();
return 0;
-
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_free:
- kfree(ep93xx_gpio);
- dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret);
- return ret;
}
static struct platform_driver ep93xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 7b95a4a8318c..1237a73c3c91 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -18,15 +18,9 @@
*/
#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <linux/init.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/of_gpio.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
#include <linux/module.h>
#define GEF_GPIO_DIRECT 0x00
@@ -39,28 +33,26 @@
#define GEF_GPIO_OVERRUN 0x1C
#define GEF_GPIO_MODE 0x20
-static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
+static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
unsigned int data;
- data = ioread32be(reg);
- /* value: 0=low; 1=high */
- if (value & 0x1)
- data = data | (0x1 << offset);
+ data = ioread32be(mmchip->regs + GEF_GPIO_OUT);
+ if (value)
+ data = data | BIT(offset);
else
- data = data & ~(0x1 << offset);
-
- iowrite32be(data, reg);
+ data = data & ~BIT(offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_OUT);
}
-
static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
{
unsigned int data;
struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data | (0x1 << offset);
+ data = data | BIT(offset);
iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
return 0;
@@ -71,11 +63,11 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
unsigned int data;
struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
- /* Set direction before switching to input */
- _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+ /* Set value before switching to output */
+ gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data & ~(0x1 << offset);
+ data = data & ~BIT(offset);
iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
return 0;
@@ -83,116 +75,56 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- unsigned int data;
- int state = 0;
struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
- data = ioread32be(mmchip->regs + GEF_GPIO_IN);
- state = (int)((data >> offset) & 0x1);
-
- return state;
+ return !!(ioread32be(mmchip->regs + GEF_GPIO_IN) & BIT(offset));
}
-static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
-}
+static const struct of_device_id gef_gpio_ids[] = {
+ {
+ .compatible = "gef,sbc610-gpio",
+ .data = (void *)19,
+ }, {
+ .compatible = "gef,sbc310-gpio",
+ .data = (void *)6,
+ }, {
+ .compatible = "ge,imp3a-gpio",
+ .data = (void *)16,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gef_gpio_ids);
-static int __init gef_gpio_init(void)
+static int __init gef_gpio_probe(struct platform_device *pdev)
{
- struct device_node *np;
- int retval;
- struct of_mm_gpio_chip *gef_gpio_chip;
-
- for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
-
- pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
-
- /* Allocate chip structure */
- gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
- if (!gef_gpio_chip) {
- pr_err("%s: Unable to allocate structure\n",
- np->full_name);
- continue;
- }
-
- /* Setup pointers to chip functions */
- gef_gpio_chip->gc.of_gpio_n_cells = 2;
- gef_gpio_chip->gc.ngpio = 19;
- gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
- gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
- gef_gpio_chip->gc.get = gef_gpio_get;
- gef_gpio_chip->gc.set = gef_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = of_mm_gpiochip_add(np, gef_gpio_chip);
- if (retval) {
- kfree(gef_gpio_chip);
- pr_err("%s: Unable to add GPIO\n", np->full_name);
- }
- }
-
- for_each_compatible_node(np, NULL, "gef,sbc310-gpio") {
-
- pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
-
- /* Allocate chip structure */
- gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
- if (!gef_gpio_chip) {
- pr_err("%s: Unable to allocate structure\n",
- np->full_name);
- continue;
- }
-
- /* Setup pointers to chip functions */
- gef_gpio_chip->gc.of_gpio_n_cells = 2;
- gef_gpio_chip->gc.ngpio = 6;
- gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
- gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
- gef_gpio_chip->gc.get = gef_gpio_get;
- gef_gpio_chip->gc.set = gef_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = of_mm_gpiochip_add(np, gef_gpio_chip);
- if (retval) {
- kfree(gef_gpio_chip);
- pr_err("%s: Unable to add GPIO\n", np->full_name);
- }
- }
-
- for_each_compatible_node(np, NULL, "ge,imp3a-gpio") {
-
- pr_debug("%s: Initialising GE GPIO\n", np->full_name);
-
- /* Allocate chip structure */
- gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
- if (!gef_gpio_chip) {
- pr_err("%s: Unable to allocate structure\n",
- np->full_name);
- continue;
- }
-
- /* Setup pointers to chip functions */
- gef_gpio_chip->gc.of_gpio_n_cells = 2;
- gef_gpio_chip->gc.ngpio = 16;
- gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
- gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
- gef_gpio_chip->gc.get = gef_gpio_get;
- gef_gpio_chip->gc.set = gef_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = of_mm_gpiochip_add(np, gef_gpio_chip);
- if (retval) {
- kfree(gef_gpio_chip);
- pr_err("%s: Unable to add GPIO\n", np->full_name);
- }
- }
+ const struct of_device_id *of_id =
+ of_match_device(gef_gpio_ids, &pdev->dev);
+ struct of_mm_gpio_chip *mmchip;
+
+ mmchip = devm_kzalloc(&pdev->dev, sizeof(*mmchip), GFP_KERNEL);
+ if (!mmchip)
+ return -ENOMEM;
+
+ /* Setup pointers to chip functions */
+ mmchip->gc.ngpio = (u16)(uintptr_t)of_id->data;
+ mmchip->gc.of_gpio_n_cells = 2;
+ mmchip->gc.direction_input = gef_gpio_dir_in;
+ mmchip->gc.direction_output = gef_gpio_dir_out;
+ mmchip->gc.get = gef_gpio_get;
+ mmchip->gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ return of_mm_gpiochip_add(pdev->dev.of_node, mmchip);
+};
- return 0;
+static struct platform_driver gef_gpio_driver = {
+ .driver = {
+ .name = "gef-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = gef_gpio_ids,
+ },
};
-arch_initcall(gef_gpio_init);
+module_platform_driver_probe(gef_gpio_driver, gef_gpio_probe);
MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index b5dff9e742f8..fea8c82bb8fc 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -388,6 +388,14 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
return 0;
}
+static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+{
+ if (gpio_pin < chip->ngpio)
+ return 0;
+
+ return -EINVAL;
+}
+
int bgpio_remove(struct bgpio_chip *bgc)
{
return gpiochip_remove(&bgc->gc);
@@ -413,6 +421,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
bgc->gc.label = dev_name(dev);
bgc->gc.base = -1;
bgc->gc.ngpio = bgc->bits;
+ bgc->gc.request = bgpio_request;
ret = bgpio_setup_io(bgc, dat, set, clr);
if (ret)
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 84d2478ec294..3c3f515b7916 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -481,7 +481,7 @@ out:
return ret;
}
-static struct of_device_id grgpio_match[] = {
+static const struct of_device_id grgpio_match[] = {
{.name = "GAISLER_GPIO"},
{.name = "01_01a"},
{},
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 2ecd3a09c743..42852eaaf020 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -152,34 +152,21 @@ static int ttl_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(dev, "no platform data\n");
- ret = -ENXIO;
- goto out_return;
+ return -ENXIO;
}
- mod = kzalloc(sizeof(*mod), GFP_KERNEL);
- if (!mod) {
- dev_err(dev, "unable to allocate private data\n");
- ret = -ENOMEM;
- goto out_return;
- }
+ mod = devm_kzalloc(dev, sizeof(*mod), GFP_KERNEL);
+ if (!mod)
+ return -ENOMEM;
platform_set_drvdata(pdev, mod);
spin_lock_init(&mod->lock);
/* get access to the MODULbus registers for this module */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "MODULbus registers not found\n");
- ret = -ENODEV;
- goto out_free_mod;
- }
-
- mod->regs = ioremap(res->start, resource_size(res));
- if (!mod->regs) {
- dev_err(dev, "MODULbus registers not ioremap\n");
- ret = -ENOMEM;
- goto out_free_mod;
- }
+ mod->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mod->regs))
+ return PTR_ERR(mod->regs);
ttl_setup_device(mod);
@@ -198,17 +185,10 @@ static int ttl_probe(struct platform_device *pdev)
ret = gpiochip_add(gpio);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
- goto out_iounmap_regs;
+ return ret;
}
return 0;
-
-out_iounmap_regs:
- iounmap(mod->regs);
-out_free_mod:
- kfree(mod);
-out_return:
- return ret;
}
static int ttl_remove(struct platform_device *pdev)
@@ -223,8 +203,6 @@ static int ttl_remove(struct platform_device *pdev)
return ret;
}
- iounmap(mod->regs);
- kfree(mod);
return 0;
}
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index c6d88173f5a2..1e5e51987d31 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -24,7 +24,7 @@
#include <linux/mfd/kempld.h>
#define KEMPLD_GPIO_MAX_NUM 16
-#define KEMPLD_GPIO_MASK(x) (1 << ((x) % 8))
+#define KEMPLD_GPIO_MASK(x) (BIT((x) % 8))
#define KEMPLD_GPIO_DIR_NUM(x) (0x40 + (x) / 8)
#define KEMPLD_GPIO_LVL_NUM(x) (0x42 + (x) / 8)
#define KEMPLD_GPIO_EVT_LVL_EDGE 0x46
@@ -216,4 +216,4 @@ module_platform_driver(kempld_gpio_driver);
MODULE_DESCRIPTION("KEM PLD GPIO Driver");
MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-kempld");
+MODULE_ALIAS("platform:kempld-gpio");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 9a82a9074a2c..2bea89b72508 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -375,10 +375,8 @@ static int lp_gpio_probe(struct platform_device *pdev)
int ret = -ENODEV;
lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
- if (!lg) {
- dev_err(dev, "can't allocate lp_gpio chip data\n");
+ if (!lg)
return -ENOMEM;
- }
lg->pdev = pdev;
platform_set_drvdata(pdev, lg);
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 8672755f95c9..0814584fcdc1 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -237,10 +237,9 @@ int __max730x_remove(struct device *dev)
ts->write(dev, 0x04, 0x00);
ret = gpiochip_remove(&ts->chip);
- if (!ret) {
+ if (!ret)
mutex_destroy(&ts->lock);
- kfree(ts);
- } else
+ else
dev_err(dev, "Failed to remove GPIO controller: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 3d53fd6880d1..fe7c0e211f9a 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -714,7 +714,7 @@ fail:
#ifdef CONFIG_OF
#ifdef CONFIG_SPI_MASTER
-static struct of_device_id mcp23s08_spi_of_match[] = {
+static const struct of_device_id mcp23s08_spi_of_match[] = {
{
.compatible = "microchip,mcp23s08",
.data = (void *) MCP_TYPE_S08,
@@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
#endif
#if IS_ENABLED(CONFIG_I2C)
-static struct of_device_id mcp23s08_i2c_of_match[] = {
+static const struct of_device_id mcp23s08_i2c_of_match[] = {
{
.compatible = "microchip,mcp23008",
.data = (void *) MCP_TYPE_008,
@@ -867,7 +867,7 @@ static int mcp23s08_probe(struct spi_device *spi)
{
struct mcp23s08_platform_data *pdata;
unsigned addr;
- unsigned chips = 0;
+ int chips = 0;
struct mcp23s08_driver_data *data;
int status, type;
unsigned base = -1,
@@ -894,11 +894,14 @@ static int mcp23s08_probe(struct spi_device *spi)
dev_err(&spi->dev, "invalid spi-present-mask\n");
return -ENODEV;
}
+
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- if ((spi_present_mask & (1 << addr)))
- chips++;
pullups[addr] = 0;
+ if (spi_present_mask & (1 << addr))
+ chips++;
}
+ if (!chips)
+ return -ENODEV;
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
@@ -937,6 +940,10 @@ static int mcp23s08_probe(struct spi_device *spi)
if (!(spi_present_mask & (1 << addr)))
continue;
chips--;
+ if (chips < 0) {
+ dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
+ goto fail;
+ }
data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base,
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index ccd45704e5fd..4661e181be04 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -113,10 +113,8 @@ static int moxart_gpio_probe(struct platform_device *pdev)
int ret;
mgc = devm_kzalloc(dev, sizeof(*mgc), GFP_KERNEL);
- if (!mgc) {
- dev_err(dev, "can't allocate GPIO chip container\n");
+ if (!mgc)
return -ENOMEM;
- }
mgc->gpio = moxart_template_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d42509422394..418e38650363 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -535,7 +535,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define mvebu_gpio_dbg_show NULL
#endif
-static struct of_device_id mvebu_gpio_of_match[] = {
+static const struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
@@ -574,10 +574,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
- if (!mvchip) {
- dev_err(&pdev->dev, "Cannot allocate memory\n");
+ if (!mvchip)
return -ENOMEM;
- }
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
dev_err(&pdev->dev, "Missing ngpios OF property\n");
@@ -738,9 +736,4 @@ static struct platform_driver mvebu_gpio_driver = {
},
.probe = mvebu_gpio_probe,
};
-
-static int __init mvebu_gpio_init(void)
-{
- return platform_driver_register(&mvebu_gpio_driver);
-}
-postcore_initcall(mvebu_gpio_init);
+module_platform_driver(mvebu_gpio_driver);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 19b886c21b1d..00f29aa1fb9d 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -24,9 +24,9 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
+#include <linux/bitops.h>
#include <linux/platform_data/gpio-omap.h>
#define OFF_MODE 1
@@ -52,7 +52,6 @@ struct gpio_bank {
struct list_head node;
void __iomem *base;
u16 irq;
- struct irq_domain *domain;
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
@@ -84,22 +83,21 @@ struct gpio_bank {
};
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
-#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
+#define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio)))
#define GPIO_MOD_CTRL_BIT BIT(0)
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
-#define LINE_USED(line, offset) (line & (1 << offset))
+#define LINE_USED(line, offset) (line & (BIT(offset)))
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
return bank->chip.base + gpio_irq;
}
-static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static inline struct gpio_bank *_irq_data_get_bank(struct irq_data *d)
{
- struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
-
- return irq_find_mapping(bank->domain, offset);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ return container_of(chip, struct gpio_bank, chip);
}
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
@@ -110,9 +108,9 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
reg += bank->regs->direction;
l = readl_relaxed(reg);
if (is_input)
- l |= 1 << gpio;
+ l |= BIT(gpio);
else
- l &= ~(1 << gpio);
+ l &= ~(BIT(gpio));
writel_relaxed(l, reg);
bank->context.oe = l;
}
@@ -155,14 +153,14 @@ static int _get_gpio_datain(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->datain;
- return (readl_relaxed(reg) & (1 << offset)) != 0;
+ return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->dataout;
- return (readl_relaxed(reg) & (1 << offset)) != 0;
+ return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
@@ -180,7 +178,7 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
- clk_enable(bank->dbck);
+ clk_prepare_enable(bank->dbck);
bank->dbck_enabled = true;
writel_relaxed(bank->dbck_enable_mask,
@@ -198,7 +196,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank)
*/
writel_relaxed(0, bank->base + bank->regs->debounce_en);
- clk_disable(bank->dbck);
+ clk_disable_unprepare(bank->dbck);
bank->dbck_enabled = false;
}
}
@@ -231,7 +229,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
l = GPIO_BIT(bank, gpio);
- clk_enable(bank->dbck);
+ clk_prepare_enable(bank->dbck);
reg = bank->base + bank->regs->debounce;
writel_relaxed(debounce, reg);
@@ -245,7 +243,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
bank->dbck_enable_mask = val;
writel_relaxed(val, reg);
- clk_disable(bank->dbck);
+ clk_disable_unprepare(bank->dbck);
/*
* Enable debounce clock per module.
* This call is mandatory because in omap_gpio_request() when
@@ -290,7 +288,7 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
bank->context.debounce = 0;
writel_relaxed(bank->context.debounce, bank->base +
bank->regs->debounce);
- clk_disable(bank->dbck);
+ clk_disable_unprepare(bank->dbck);
bank->dbck_enabled = false;
}
}
@@ -299,7 +297,7 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
void __iomem *base = bank->base;
- u32 gpio_bit = 1 << gpio;
+ u32 gpio_bit = BIT(gpio);
_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
trigger & IRQ_TYPE_LEVEL_LOW);
@@ -368,9 +366,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
l = readl_relaxed(reg);
if ((l >> gpio) & 1)
- l &= ~(1 << gpio);
+ l &= ~(BIT(gpio));
else
- l |= 1 << gpio;
+ l |= BIT(gpio);
writel_relaxed(l, reg);
}
@@ -392,11 +390,11 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
l = readl_relaxed(reg);
if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
- bank->toggle_mask |= 1 << gpio;
+ bank->toggle_mask |= BIT(gpio);
if (trigger & IRQ_TYPE_EDGE_RISING)
- l |= 1 << gpio;
+ l |= BIT(gpio);
else if (trigger & IRQ_TYPE_EDGE_FALLING)
- l &= ~(1 << gpio);
+ l &= ~(BIT(gpio));
else
return -EINVAL;
@@ -413,10 +411,10 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 2 << (gpio << 1);
if (trigger & IRQ_TYPE_EDGE_FALLING)
- l |= 1 << (gpio << 1);
+ l |= BIT(gpio << 1);
/* Enable wake-up during idle for dynamic tick */
- _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
+ _gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
writel_relaxed(l, reg);
@@ -430,7 +428,7 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
void __iomem *reg = bank->base + bank->regs->pinctrl;
/* Claim the pin for MPU */
- writel_relaxed(readl_relaxed(reg) | (1 << offset), reg);
+ writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
}
if (bank->regs->ctrl && !BANK_USED(bank)) {
@@ -453,7 +451,7 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
!LINE_USED(bank->mod_usage, offset) &&
!LINE_USED(bank->irq_usage, offset)) {
/* Disable wake-up during idle for dynamic tick */
- _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+ _gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
}
@@ -479,7 +477,7 @@ static int gpio_is_input(struct gpio_bank *bank, int mask)
static int gpio_irq_type(struct irq_data *d, unsigned type)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned gpio = 0;
int retval;
unsigned long flags;
@@ -509,20 +507,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
if (!LINE_USED(bank->mod_usage, offset)) {
_enable_gpio_module(bank, offset);
_set_gpio_direction(bank, offset, 1);
- } else if (!gpio_is_input(bank, 1 << offset)) {
+ } else if (!gpio_is_input(bank, BIT(offset))) {
spin_unlock_irqrestore(&bank->lock, flags);
return -EINVAL;
}
- retval = gpio_lock_as_irq(&bank->chip, offset);
- if (retval) {
- dev_err(bank->dev, "unable to lock offset %d for IRQ\n",
- offset);
- spin_unlock_irqrestore(&bank->lock, flags);
- return retval;
- }
-
- bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
+ bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -559,7 +549,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
void __iomem *reg = bank->base;
u32 l;
- u32 mask = (1 << bank->width) - 1;
+ u32 mask = (BIT(bank->width)) - 1;
reg += bank->regs->irqenable;
l = readl_relaxed(reg);
@@ -664,7 +654,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio)
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned int gpio = irq_to_gpio(bank, d->hwirq);
return _set_gpio_wakeup(bank, gpio, enable);
@@ -691,7 +681,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
_enable_gpio_module(bank, offset);
}
- bank->mod_usage |= 1 << offset;
+ bank->mod_usage |= BIT(offset);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -703,7 +693,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- bank->mod_usage &= ~(1 << offset);
+ bank->mod_usage &= ~(BIT(offset));
_disable_gpio_module(bank, offset);
_reset_gpio(bank, bank->chip.base + offset);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -732,11 +722,12 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
unsigned int bit;
struct gpio_bank *bank;
int unmasked = 0;
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct gpio_chip *chip = irq_get_handler_data(irq);
- chained_irq_enter(chip, desc);
+ chained_irq_enter(irqchip, desc);
- bank = irq_get_handler_data(irq);
+ bank = container_of(chip, struct gpio_bank, chip);
isr_reg = bank->base + bank->regs->irqstatus;
pm_runtime_get_sync(bank->dev);
@@ -764,7 +755,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
configured, we could unmask GPIO bank interrupt immediately */
if (!level_mask && !unmasked) {
unmasked = 1;
- chained_irq_exit(chip, desc);
+ chained_irq_exit(irqchip, desc);
}
if (!isr)
@@ -772,7 +763,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
while (isr) {
bit = __ffs(isr);
- isr &= ~(1 << bit);
+ isr &= ~(BIT(bit));
/*
* Some chips can't respond to both rising and falling
@@ -781,10 +772,11 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
* to respond to the IRQ for the opposite direction.
* This will be indicated in the bank toggle_mask.
*/
- if (bank->toggle_mask & (1 << bit))
+ if (bank->toggle_mask & (BIT(bit)))
_toggle_gpio_edge_triggering(bank, bit);
- generic_handle_irq(irq_find_mapping(bank->domain, bit));
+ generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
+ bit));
}
}
/* if bank has any level sensitive GPIO pin interrupt
@@ -793,20 +785,20 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
interrupt */
exit:
if (!unmasked)
- chained_irq_exit(chip, desc);
+ chained_irq_exit(irqchip, desc);
pm_runtime_put(bank->dev);
}
static void gpio_irq_shutdown(struct irq_data *d)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned long flags;
unsigned offset = GPIO_INDEX(bank, gpio);
spin_lock_irqsave(&bank->lock, flags);
gpio_unlock_as_irq(&bank->chip, offset);
- bank->irq_usage &= ~(1 << offset);
+ bank->irq_usage &= ~(BIT(offset));
_disable_gpio_module(bank, offset);
_reset_gpio(bank, gpio);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -821,7 +813,7 @@ static void gpio_irq_shutdown(struct irq_data *d)
static void gpio_ack_irq(struct irq_data *d)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned int gpio = irq_to_gpio(bank, d->hwirq);
_clear_gpio_irqstatus(bank, gpio);
@@ -829,7 +821,7 @@ static void gpio_ack_irq(struct irq_data *d)
static void gpio_mask_irq(struct irq_data *d)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned long flags;
@@ -841,7 +833,7 @@ static void gpio_mask_irq(struct irq_data *d)
static void gpio_unmask_irq(struct irq_data *d)
{
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_bank *bank = _irq_data_get_bank(d);
unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
@@ -936,6 +928,21 @@ static inline void mpuio_init(struct gpio_bank *bank)
/*---------------------------------------------------------------------*/
+static int gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+ void __iomem *reg;
+ int dir;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ reg = bank->base + bank->regs->direction;
+ spin_lock_irqsave(&bank->lock, flags);
+ dir = !!(readl_relaxed(reg) & BIT(offset));
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return dir;
+}
+
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
@@ -954,7 +961,7 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset)
u32 mask;
bank = container_of(chip, struct gpio_bank, chip);
- mask = (1 << offset);
+ mask = (BIT(offset));
if (gpio_is_input(bank, mask))
return _get_gpio_datain(bank, offset);
@@ -1081,10 +1088,12 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-static void omap_gpio_chip_init(struct gpio_bank *bank)
+static int omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
static int gpio;
+ int irq_base = 0;
+ int ret;
/*
* REVISIT eventually switch from OMAP-specific gpio structs
@@ -1092,12 +1101,12 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
*/
bank->chip.request = omap_gpio_request;
bank->chip.free = omap_gpio_free;
+ bank->chip.get_direction = gpio_get_direction;
bank->chip.direction_input = gpio_input;
bank->chip.get = gpio_get;
bank->chip.direction_output = gpio_output;
bank->chip.set_debounce = gpio_debounce;
bank->chip.set = gpio_set;
- bank->chip.to_irq = omap_gpio_to_irq;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1110,22 +1119,48 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
}
bank->chip.ngpio = bank->width;
- gpiochip_add(&bank->chip);
+ ret = gpiochip_add(&bank->chip);
+ if (ret) {
+ dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_ARCH_OMAP1
+ /*
+ * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
+ * irq_alloc_descs() since a base IRQ offset will no longer be needed.
+ */
+ irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
+ if (irq_base < 0) {
+ dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
+ return -ENODEV;
+ }
+#endif
+
+ ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip,
+ irq_base, gpio_irq_handler,
+ IRQ_TYPE_NONE);
+
+ if (ret) {
+ dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
+ ret = gpiochip_remove(&bank->chip);
+ return -ENODEV;
+ }
+
+ gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip,
+ bank->irq, gpio_irq_handler);
for (j = 0; j < bank->width; j++) {
- int irq = irq_create_mapping(bank->domain, j);
+ int irq = irq_find_mapping(bank->chip.irqdomain, j);
irq_set_lockdep_class(irq, &gpio_lock_class);
- irq_set_chip_data(irq, bank);
if (bank->is_mpuio) {
omap_mpuio_alloc_gc(bank, irq, bank->width);
- } else {
- irq_set_chip_and_handler(irq, &gpio_irq_chip,
- handle_simple_irq);
- set_irq_flags(irq, IRQF_VALID);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ set_irq_flags(irq, 0);
}
}
- irq_set_chained_handler(bank->irq, gpio_irq_handler);
- irq_set_handler_data(bank->irq, bank);
+
+ return 0;
}
static const struct of_device_id omap_gpio_match[];
@@ -1138,9 +1173,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
const struct omap_gpio_platform_data *pdata;
struct resource *res;
struct gpio_bank *bank;
-#ifdef CONFIG_ARCH_OMAP1
- int irq_base;
-#endif
+ int ret;
match = of_match_device(of_match_ptr(omap_gpio_match), dev);
@@ -1162,6 +1195,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->irq = res->start;
bank->dev = dev;
+ bank->chip.dev = dev;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
@@ -1182,29 +1216,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
-#ifdef CONFIG_ARCH_OMAP1
- /*
- * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
- * irq_alloc_descs() and irq_domain_add_legacy() and just use a
- * linear IRQ domain mapping for all OMAP platforms.
- */
- irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
- if (irq_base < 0) {
- dev_err(dev, "Couldn't allocate IRQ numbers\n");
- return -ENODEV;
- }
-
- bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
- 0, &irq_domain_simple_ops, NULL);
-#else
- bank->domain = irq_domain_add_linear(node, bank->width,
- &irq_domain_simple_ops, NULL);
-#endif
- if (!bank->domain) {
- dev_err(dev, "Couldn't register an IRQ domain\n");
- return -ENODEV;
- }
-
if (bank->regs->set_dataout && bank->regs->clr_dataout)
bank->set_dataout = _set_gpio_dataout_reg;
else
@@ -1216,7 +1227,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bank->base = devm_ioremap_resource(dev, res);
if (IS_ERR(bank->base)) {
- irq_domain_remove(bank->domain);
+ irq_domain_remove(bank->chip.irqdomain);
return PTR_ERR(bank->base);
}
@@ -1230,7 +1241,11 @@ static int omap_gpio_probe(struct platform_device *pdev)
mpuio_init(bank);
omap_gpio_mod_init(bank);
- omap_gpio_chip_init(bank);
+
+ ret = omap_gpio_chip_init(bank);
+ if (ret)
+ return ret;
+
omap_gpio_show_rev(bank);
pm_runtime_put(bank->dev);
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index da9d33252e56..86bdbe362068 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -148,7 +148,7 @@ static const struct palmas_device_data tps80036_dev_data = {
.ngpio = 16,
};
-static struct of_device_id of_palmas_gpio_match[] = {
+static const struct of_device_id of_palmas_gpio_match[] = {
{ .compatible = "ti,palmas-gpio", .data = &palmas_dev_data,},
{ .compatible = "ti,tps65913-gpio", .data = &palmas_dev_data,},
{ .compatible = "ti,tps65914-gpio", .data = &palmas_dev_data,},
@@ -173,10 +173,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio = devm_kzalloc(&pdev->dev,
sizeof(*palmas_gpio), GFP_KERNEL);
- if (!palmas_gpio) {
- dev_err(&pdev->dev, "Could not allocate palmas_gpio\n");
+ if (!palmas_gpio)
return -ENOMEM;
- }
palmas_gpio->palmas = palmas;
palmas_gpio->gpio_chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d550d8e58705..e721a37c3473 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -15,8 +15,6 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/i2c.h>
#include <linux/platform_data/pca953x.h>
#include <linux/slab.h>
@@ -91,7 +89,6 @@ struct pca953x_chip {
u8 irq_stat[MAX_BANK];
u8 irq_trig_raise[MAX_BANK];
u8 irq_trig_fall[MAX_BANK];
- struct irq_domain *domain;
#endif
struct i2c_client *client;
@@ -100,6 +97,11 @@ struct pca953x_chip {
int chip_type;
};
+static inline struct pca953x_chip *to_pca(struct gpio_chip *gc)
+{
+ return container_of(gc, struct pca953x_chip, gpio_chip);
+}
+
static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
int off)
{
@@ -202,12 +204,10 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
- struct pca953x_chip *chip;
+ struct pca953x_chip *chip = to_pca(gc);
u8 reg_val;
int ret, offset = 0;
- chip = container_of(gc, struct pca953x_chip, gpio_chip);
-
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
@@ -233,12 +233,10 @@ exit:
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
- struct pca953x_chip *chip;
+ struct pca953x_chip *chip = to_pca(gc);
u8 reg_val;
int ret, offset = 0;
- chip = container_of(gc, struct pca953x_chip, gpio_chip);
-
mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
@@ -285,12 +283,10 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
- struct pca953x_chip *chip;
+ struct pca953x_chip *chip = to_pca(gc);
u32 reg_val;
int ret, offset = 0;
- chip = container_of(gc, struct pca953x_chip, gpio_chip);
-
mutex_lock(&chip->i2c_lock);
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -315,12 +311,10 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
- struct pca953x_chip *chip;
+ struct pca953x_chip *chip = to_pca(gc);
u8 reg_val;
int ret, offset = 0;
- chip = container_of(gc, struct pca953x_chip, gpio_chip);
-
mutex_lock(&chip->i2c_lock);
if (val)
reg_val = chip->reg_output[off / BANK_SZ]
@@ -367,38 +361,34 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
}
#ifdef CONFIG_GPIO_PCA953X_IRQ
-static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
-{
- struct pca953x_chip *chip;
-
- chip = container_of(gc, struct pca953x_chip, gpio_chip);
- return irq_create_mapping(chip->domain, off);
-}
-
static void pca953x_irq_mask(struct irq_data *d)
{
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pca953x_chip *chip = to_pca(gc);
chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ));
}
static void pca953x_irq_unmask(struct irq_data *d)
{
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pca953x_chip *chip = to_pca(gc);
chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ);
}
static void pca953x_irq_bus_lock(struct irq_data *d)
{
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pca953x_chip *chip = to_pca(gc);
mutex_lock(&chip->irq_lock);
}
static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pca953x_chip *chip = to_pca(gc);
u8 new_irqs;
int level, i;
@@ -420,7 +410,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pca953x_chip *chip = to_pca(gc);
int bank_nb = d->hwirq / BANK_SZ;
u8 mask = 1 << (d->hwirq % BANK_SZ);
@@ -503,44 +494,25 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
struct pca953x_chip *chip = devid;
u8 pending[MAX_BANK];
u8 level;
+ unsigned nhandled = 0;
int i;
if (!pca953x_irq_pending(chip, pending))
- return IRQ_HANDLED;
+ return IRQ_NONE;
for (i = 0; i < NBANK(chip); i++) {
while (pending[i]) {
level = __ffs(pending[i]);
- handle_nested_irq(irq_find_mapping(chip->domain,
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
level + (BANK_SZ * i)));
pending[i] &= ~(1 << level);
+ nhandled++;
}
}
- return IRQ_HANDLED;
+ return (nhandled > 0) ? IRQ_HANDLED : IRQ_NONE;
}
-static int pca953x_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip_data(irq, d->host_data);
- irq_set_chip(irq, &pca953x_irq_chip);
- irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
-
- return 0;
-}
-
-static const struct irq_domain_ops pca953x_irq_simple_ops = {
- .map = pca953x_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id,
int irq_base)
@@ -572,19 +544,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
chip->irq_stat[i] &= chip->reg_direction[i];
mutex_init(&chip->irq_lock);
- chip->domain = irq_domain_add_simple(client->dev.of_node,
- chip->gpio_chip.ngpio,
- irq_base,
- &pca953x_irq_simple_ops,
- chip);
- if (!chip->domain)
- return -ENODEV;
-
ret = devm_request_threaded_irq(&client->dev,
client->irq,
NULL,
pca953x_irq_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_SHARED,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -592,7 +557,16 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return ret;
}
- chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
+ ret = gpiochip_irqchip_add(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&client->dev,
+ "could not connect irqchip to gpiochip\n");
+ return ret;
+ }
}
return 0;
@@ -756,11 +730,11 @@ static int pca953x_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = pca953x_irq_setup(chip, id, irq_base);
+ ret = gpiochip_add(&chip->gpio_chip);
if (ret)
return ret;
- ret = gpiochip_add(&chip->gpio_chip);
+ ret = pca953x_irq_setup(chip, id, irq_base);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 82735822bc9d..27b46751ea7e 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -262,7 +262,7 @@ static int pcf857x_irq_domain_init(struct pcf857x *gpio,
/* enable real irq */
status = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf857x_irq, IRQF_ONESHOT |
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED,
dev_name(&client->dev), gpio);
if (status)
@@ -319,7 +319,7 @@ static int pcf857x_probe(struct i2c_client *client,
status = pcf857x_irq_domain_init(gpio, client);
if (status < 0) {
dev_err(&client->dev, "irq_domain init failed\n");
- goto fail;
+ goto fail_irq_domain;
}
}
@@ -414,12 +414,13 @@ static int pcf857x_probe(struct i2c_client *client,
return 0;
fail:
- dev_dbg(&client->dev, "probe error %d for '%s'\n",
- status, client->name);
-
if (client->irq)
pcf857x_irq_domain_cleanup(gpio);
+fail_irq_domain:
+ dev_dbg(&client->dev, "probe error %d for '%s'\n",
+ status, client->name);
+
return status;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 83a156397474..d6eac9b17db9 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -20,6 +20,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/slab.h>
#define PCH_EDGE_FALLING 0
#define PCH_EDGE_RISING BIT(0)
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index b0f475243cef..84b49cfb81a8 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
-#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
@@ -88,7 +87,7 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
spin_lock_irqsave(&chip->lock, flags);
gpiodir = readb(chip->base + GPIODIR);
- gpiodir &= ~(1 << offset);
+ gpiodir &= ~(BIT(offset));
writeb(gpiodir, chip->base + GPIODIR);
spin_unlock_irqrestore(&chip->lock, flags);
@@ -106,16 +105,16 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
return -EINVAL;
spin_lock_irqsave(&chip->lock, flags);
- writeb(!!value << offset, chip->base + (1 << (offset + 2)));
+ writeb(!!value << offset, chip->base + (BIT(offset + 2)));
gpiodir = readb(chip->base + GPIODIR);
- gpiodir |= 1 << offset;
+ gpiodir |= BIT(offset);
writeb(gpiodir, chip->base + GPIODIR);
/*
* gpio value is set again, because pl061 doesn't allow to set value of
* a gpio pin before configuring it in OUT mode.
*/
- writeb(!!value << offset, chip->base + (1 << (offset + 2)));
+ writeb(!!value << offset, chip->base + (BIT(offset + 2)));
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
@@ -125,14 +124,14 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- return !!readb(chip->base + (1 << (offset + 2)));
+ return !!readb(chip->base + (BIT(offset + 2)));
}
static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- writeb(!!value << offset, chip->base + (1 << (offset + 2)));
+ writeb(!!value << offset, chip->base + (BIT(offset + 2)));
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
@@ -207,7 +206,7 @@ static void pl061_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR);
+ u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
spin_lock(&chip->lock);
@@ -220,7 +219,7 @@ static void pl061_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR);
+ u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
spin_lock(&chip->lock);
@@ -302,9 +301,9 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
- if (pdata->directions & (1 << i))
+ if (pdata->directions & (BIT(i)))
pl061_direction_output(&chip->gc, i,
- pdata->values & (1 << i));
+ pdata->values & (BIT(i)));
else
pl061_direction_input(&chip->gc, i);
}
@@ -331,7 +330,7 @@ static int pl061_suspend(struct device *dev)
chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (1 << offset))
+ if (chip->csave_regs.gpio_dir & (BIT(offset)))
chip->csave_regs.gpio_data |=
pl061_get_value(&chip->gc, offset) << offset;
}
@@ -345,10 +344,10 @@ static int pl061_resume(struct device *dev)
int offset;
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (1 << offset))
+ if (chip->csave_regs.gpio_dir & (BIT(offset)))
pl061_direction_output(&chip->gc, offset,
chip->csave_regs.gpio_data &
- (1 << offset));
+ (BIT(offset)));
else
pl061_direction_input(&chip->gc, offset);
}
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 9b423173ab50..562b0c4d9cc8 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -119,10 +119,8 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio),
GFP_KERNEL);
- if (!rc5t583_gpio) {
- dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed");
+ if (!rc5t583_gpio)
return -ENOMEM;
- }
rc5t583_gpio->gpio_chip.label = "gpio-rc5t583",
rc5t583_gpio->gpio_chip.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 03c91482432c..0c9f803fc1ac 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -362,7 +363,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
if (!p) {
- dev_err(dev, "failed to allocate driver data\n");
ret = -ENOMEM;
goto err0;
}
@@ -377,6 +377,9 @@ static int gpio_rcar_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -460,6 +463,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
err1:
irq_domain_remove(p->irq_domain);
err0:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -473,6 +478,8 @@ static int gpio_rcar_remove(struct platform_device *pdev)
return ret;
irq_domain_remove(p->irq_domain);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 88577c3272a5..9fa7e53331c9 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -141,17 +141,15 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
- if (!rdc321x_gpio_dev) {
- dev_err(&pdev->dev, "failed to allocate private data\n");
+ rdc321x_gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct rdc321x_gpio),
+ GFP_KERNEL);
+ if (!rdc321x_gpio_dev)
return -ENOMEM;
- }
r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
if (!r) {
dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
- err = -ENODEV;
- goto out_free;
+ return -ENODEV;
}
spin_lock_init(&rdc321x_gpio_dev->lock);
@@ -162,8 +160,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
if (!r) {
dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
- err = -ENODEV;
- goto out_free;
+ return -ENODEV;
}
rdc321x_gpio_dev->reg2_ctrl_base = r->start;
@@ -187,21 +184,17 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->reg1_data_base,
&rdc321x_gpio_dev->data_reg[0]);
if (err)
- goto out_free;
+ return err;
err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
rdc321x_gpio_dev->reg2_data_base,
&rdc321x_gpio_dev->data_reg[1]);
if (err)
- goto out_free;
+ return err;
dev_info(&pdev->dev, "registering %d GPIOs\n",
rdc321x_gpio_dev->chip.ngpio);
return gpiochip_add(&rdc321x_gpio_dev->chip);
-
-out_free:
- kfree(rdc321x_gpio_dev);
- return err;
}
static int rdc321x_gpio_remove(struct platform_device *pdev)
@@ -213,8 +206,6 @@ static int rdc321x_gpio_remove(struct platform_device *pdev)
if (ret)
dev_err(&pdev->dev, "failed to unregister chip\n");
- kfree(rdc321x_gpio_dev);
-
return ret;
}
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 5af65719b95d..a9b1cd16c848 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -97,8 +97,6 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc,
u8 curr_dirs;
unsigned short offset, bit;
- sch_gpio_core_set(gc, gpio_num, val);
-
spin_lock(&gpio_lock);
offset = CGIO + gpio_num / 8;
@@ -109,6 +107,17 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc,
outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
spin_unlock(&gpio_lock);
+
+ /*
+ * according to the datasheet, writing to the level register has no
+ * effect when GPIO is programmed as input.
+ * Actually the the level register is read-only when configured as input.
+ * Thus presetting the output level before switching to output is _NOT_ possible.
+ * Hence we set the level after configuring the GPIO as output.
+ * But we cannot prevent a short low pulse if direction is set to high
+ * and an external pull-up is connected.
+ */
+ sch_gpio_core_set(gc, gpio_num, val);
return 0;
}
@@ -178,8 +187,6 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc,
u8 curr_dirs;
unsigned short offset, bit;
- sch_gpio_resume_set(gc, gpio_num, val);
-
offset = RGIO + gpio_num / 8;
bit = gpio_num % 8;
@@ -190,6 +197,17 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc,
outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
spin_unlock(&gpio_lock);
+
+ /*
+ * according to the datasheet, writing to the level register has no
+ * effect when GPIO is programmed as input.
+ * Actually the the level register is read-only when configured as input.
+ * Thus presetting the output level before switching to output is _NOT_ possible.
+ * Hence we set the level after configuring the GPIO as output.
+ * But we cannot prevent a short low pulse if direction is set to high
+ * and an external pull-up is connected.
+ */
+ sch_gpio_resume_set(gc, gpio_num, val);
return 0;
}
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 0357387b3645..f942b80ee403 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -327,14 +327,22 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
if (err)
return err;
- /* Check device ID. We currently know about:
- * SCH3112 (0x7c), SCH3114 (0x7d), and SCH3116 (0x7f). */
+ /* Check device ID. */
reg = sch311x_sio_inb(sio_config_port, 0x20);
- if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
+ switch (reg) {
+ case 0x7c: /* SCH3112 */
+ dev_id = 2;
+ break;
+ case 0x7d: /* SCH3114 */
+ dev_id = 4;
+ break;
+ case 0x7f: /* SCH3116 */
+ dev_id = 6;
+ break;
+ default:
err = -ENODEV;
goto exit;
}
- dev_id = reg == 0x7c ? 2 : reg == 0x7d ? 4 : 6;
/* Select logical device A (runtime registers) */
sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 30bcc539425d..353263c85d26 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -129,10 +129,8 @@ static int spics_gpio_probe(struct platform_device *pdev)
int ret;
spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
- if (!spics) {
- dev_err(&pdev->dev, "memory allocation fail\n");
+ if (!spics)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spics->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index 13d73fb2b5e1..b51ca9f5c140 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include <linux/i2c/sx150x.h>
#define NO_UPDATE_PENDING -1
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 1019320984d7..51f7cbd9ff71 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -12,8 +12,6 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/mfd/tc3589x.h>
@@ -31,10 +29,6 @@ struct tc3589x_gpio {
struct tc3589x *tc3589x;
struct device *dev;
struct mutex irq_lock;
- struct irq_domain *domain;
-
- int irq_base;
-
/* Caches of interrupt control registers for bus_lock */
u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
@@ -95,30 +89,6 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0);
}
-/**
- * tc3589x_gpio_irq_get_irq(): Map a hardware IRQ on a chip to a Linux IRQ
- *
- * @tc3589x_gpio: tc3589x_gpio_irq controller to operate on.
- * @irq: index of the hardware interrupt requested in the chip IRQs
- *
- * Useful for drivers to request their own IRQs.
- */
-static int tc3589x_gpio_irq_get_irq(struct tc3589x_gpio *tc3589x_gpio,
- int hwirq)
-{
- if (!tc3589x_gpio)
- return -EINVAL;
-
- return irq_create_mapping(tc3589x_gpio->domain, hwirq);
-}
-
-static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip);
-
- return tc3589x_gpio_irq_get_irq(tc3589x_gpio, offset);
-}
-
static struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
@@ -126,13 +96,13 @@ static struct gpio_chip template_chip = {
.get = tc3589x_gpio_get,
.direction_output = tc3589x_gpio_direction_output,
.set = tc3589x_gpio_set,
- .to_irq = tc3589x_gpio_to_irq,
.can_sleep = true,
};
static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -159,14 +129,16 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static void tc3589x_gpio_irq_lock(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
mutex_lock(&tc3589x_gpio->irq_lock);
}
static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
static const u8 regmap[] = {
[REG_IBE] = TC3589x_GPIOIBE0,
@@ -194,7 +166,8 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
static void tc3589x_gpio_irq_mask(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -204,7 +177,8 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
static void tc3589x_gpio_irq_unmask(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -242,7 +216,8 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = i * 8 + bit;
- int irq = tc3589x_gpio_irq_get_irq(tc3589x_gpio, line);
+ int irq = irq_find_mapping(tc3589x_gpio->chip.irqdomain,
+ line);
handle_nested_irq(irq);
stat &= ~(1 << bit);
@@ -254,61 +229,6 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct tc3589x *tc3589x_gpio = d->host_data;
-
- irq_set_chip_data(irq, tc3589x_gpio);
- irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
-
- return 0;
-}
-
-static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
-{
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
-}
-
-static struct irq_domain_ops tc3589x_irq_ops = {
- .map = tc3589x_gpio_irq_map,
- .unmap = tc3589x_gpio_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio,
- struct device_node *np)
-{
- int base = tc3589x_gpio->irq_base;
-
- /*
- * If this results in a linear domain, irq_create_mapping() will
- * take care of allocating IRQ descriptors at runtime. When a base
- * is provided, the IRQ descriptors will be allocated when the
- * domain is instantiated.
- */
- tc3589x_gpio->domain = irq_domain_add_simple(np,
- tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops,
- tc3589x_gpio);
- if (!tc3589x_gpio->domain) {
- dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n");
- return -ENOSYS;
- }
-
- return 0;
-}
-
static int tc3589x_gpio_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
@@ -329,7 +249,8 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- tc3589x_gpio = kzalloc(sizeof(struct tc3589x_gpio), GFP_KERNEL);
+ tc3589x_gpio = devm_kzalloc(&pdev->dev, sizeof(struct tc3589x_gpio),
+ GFP_KERNEL);
if (!tc3589x_gpio)
return -ENOMEM;
@@ -347,30 +268,36 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.of_node = np;
#endif
- tc3589x_gpio->irq_base = tc3589x->irq_base ?
- tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0;
-
/* Bring the GPIO module out of reset */
ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL,
TC3589x_RSTCTRL_GPIRST, 0);
if (ret < 0)
- goto out_free;
-
- ret = tc3589x_gpio_irq_init(tc3589x_gpio, np);
- if (ret)
- goto out_free;
+ return ret;
- ret = request_threaded_irq(irq, NULL, tc3589x_gpio_irq, IRQF_ONESHOT,
- "tc3589x-gpio", tc3589x_gpio);
+ ret = devm_request_threaded_irq(&pdev->dev,
+ irq, NULL, tc3589x_gpio_irq,
+ IRQF_ONESHOT, "tc3589x-gpio",
+ tc3589x_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_free;
+ return ret;
}
ret = gpiochip_add(&tc3589x_gpio->chip);
if (ret) {
dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
- goto out_freeirq;
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&tc3589x_gpio->chip,
+ &tc3589x_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not connect irqchip to gpiochip\n");
+ return ret;
}
if (pdata && pdata->setup)
@@ -379,12 +306,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tc3589x_gpio);
return 0;
-
-out_freeirq:
- free_irq(irq, tc3589x_gpio);
-out_free:
- kfree(tc3589x_gpio);
- return ret;
}
static int tc3589x_gpio_remove(struct platform_device *pdev)
@@ -392,7 +313,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev)
struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio;
- int irq = platform_get_irq(pdev, 0);
int ret;
if (pdata && pdata->remove)
@@ -405,10 +325,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev)
return ret;
}
- free_irq(irq, tc3589x_gpio);
-
- kfree(tc3589x_gpio);
-
return 0;
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 2b49f878b56c..4e8fb8261a87 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -408,7 +408,7 @@ static struct tegra_gpio_soc_config tegra30_gpio_config = {
.upper_offset = 0x80,
};
-static struct of_device_id tegra_gpio_of_match[] = {
+static const struct of_device_id tegra_gpio_of_match[] = {
{ .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
{ },
@@ -458,10 +458,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_banks = devm_kzalloc(&pdev->dev,
tegra_gpio_bank_count * sizeof(*tegra_gpio_banks),
GFP_KERNEL);
- if (!tegra_gpio_banks) {
- dev_err(&pdev->dev, "Couldn't allocate bank structure\n");
+ if (!tegra_gpio_banks)
return -ENODEV;
- }
irq_domain = irq_domain_add_linear(pdev->dev.of_node,
tegra_gpio_chip.ngpio,
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index f9a8fbde108e..efc7c129016d 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -224,6 +224,7 @@ static struct irq_chip timbgpio_irqchip = {
static int timbgpio_probe(struct platform_device *pdev)
{
int err, i;
+ struct device *dev = &pdev->dev;
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
@@ -231,35 +232,35 @@ static int timbgpio_probe(struct platform_device *pdev)
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
- err = -EINVAL;
- goto err_mem;
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
}
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
- err = -EINVAL;
- goto err_mem;
+ dev_err(dev, "Unable to get resource\n");
+ return -EINVAL;
}
- tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
+ tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL);
if (!tgpio) {
- err = -EINVAL;
- goto err_mem;
+ dev_err(dev, "Memory alloc failed\n");
+ return -EINVAL;
}
tgpio->irq_base = pdata->irq_base;
spin_lock_init(&tgpio->lock);
- if (!request_mem_region(iomem->start, resource_size(iomem),
- DRIVER_NAME)) {
- err = -EBUSY;
- goto err_request;
+ if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem),
+ DRIVER_NAME)) {
+ dev_err(dev, "Region already claimed\n");
+ return -EBUSY;
}
- tgpio->membase = ioremap(iomem->start, resource_size(iomem));
+ tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem));
if (!tgpio->membase) {
- err = -ENOMEM;
- goto err_ioremap;
+ dev_err(dev, "Cannot ioremap\n");
+ return -ENOMEM;
}
gc = &tgpio->gpio;
@@ -279,7 +280,7 @@ static int timbgpio_probe(struct platform_device *pdev)
err = gpiochip_add(gc);
if (err)
- goto err_chipadd;
+ return err;
platform_set_drvdata(pdev, tgpio);
@@ -302,17 +303,6 @@ static int timbgpio_probe(struct platform_device *pdev)
irq_set_chained_handler(irq, timbgpio_irq);
return 0;
-
-err_chipadd:
- iounmap(tgpio->membase);
-err_ioremap:
- release_mem_region(iomem->start, resource_size(iomem));
-err_request:
- kfree(tgpio);
-err_mem:
- printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
-
- return err;
}
static int timbgpio_remove(struct platform_device *pdev)
@@ -320,7 +310,6 @@ static int timbgpio_remove(struct platform_device *pdev)
int err;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timbgpio *tgpio = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
@@ -338,10 +327,6 @@ static int timbgpio_remove(struct platform_device *pdev)
if (err)
printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
- iounmap(tgpio->membase);
- release_mem_region(iomem->start, resource_size(iomem));
- kfree(tgpio);
-
return 0;
}
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 8994dfa13491..a69fbea41253 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -97,10 +97,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
pdata = dev_get_platdata(pdev->dev.parent);
tps6586x_gpio = devm_kzalloc(&pdev->dev,
sizeof(*tps6586x_gpio), GFP_KERNEL);
- if (!tps6586x_gpio) {
- dev_err(&pdev->dev, "Could not allocate tps6586x_gpio\n");
+ if (!tps6586x_gpio)
return -ENOMEM;
- }
tps6586x_gpio->parent = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index b6e818e68007..e2f8cda235ea 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -123,10 +123,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio = devm_kzalloc(&pdev->dev,
sizeof(*tps65910_gpio), GFP_KERNEL);
- if (!tps65910_gpio) {
- dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+ if (!tps65910_gpio)
return -ENOMEM;
- }
tps65910_gpio->tps65910 = tps65910;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 792a05ad4649..12481867daf1 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -289,7 +289,7 @@ static int xgpio_of_probe(struct device_node *np)
return 0;
}
-static struct of_device_id xgpio_of_match[] = {
+static const struct of_device_id xgpio_of_match[] = {
{ .compatible = "xlnx,xps-gpio-1.00.a", },
{ /* end of list */ },
};
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 9bf5034b6cdb..54e54e4cc6c4 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -81,9 +81,15 @@ static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin,
static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
{
struct zevio_gpio *controller = to_zevio_gpio(chip);
+ u32 val, dir;
- /* Only reading allowed, so no spinlock needed */
- u32 val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT);
+ spin_lock(&controller->lock);
+ dir = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
+ if (dir & BIT(ZEVIO_GPIO_BIT(pin)))
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT);
+ else
+ val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
+ spin_unlock(&controller->lock);
return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
}
@@ -172,10 +178,8 @@ static int zevio_gpio_probe(struct platform_device *pdev)
int status, i;
controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL);
- if (!controller) {
- dev_err(&pdev->dev, "not enough free memory\n");
+ if (!controller)
return -ENOMEM;
- }
/* Copy our reference */
controller->chip.gc = zevio_gpio_chip;
@@ -198,7 +202,7 @@ static int zevio_gpio_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id zevio_gpio_of_match[] = {
+static const struct of_device_id zevio_gpio_of_match[] = {
{ .compatible = "lsi,zevio-gpio", },
{ },
};
@@ -209,7 +213,7 @@ static struct platform_driver zevio_gpio_driver = {
.driver = {
.name = "gpio-zevio",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(zevio_gpio_of_match),
+ .of_match_table = zevio_gpio_of_match,
},
.probe = zevio_gpio_probe,
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 401add28933f..4a987917c186 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -449,9 +449,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
mutex_unlock(&achip->conn_lock);
if (function == ACPI_WRITE)
- gpiod_set_raw_value(desc, !!((1 << i) & *value));
+ gpiod_set_raw_value_cansleep(desc,
+ !!((1 << i) & *value));
else
- *value |= (u64)gpiod_get_raw_value(desc) << i;
+ *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
}
out:
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2024d45e5503..af7e25c9a9ae 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
if (ret < 0)
return false;
- gg_data->out_gpio = gpio_to_desc(ret + gc->base);
+ gg_data->out_gpio = gpiochip_get_desc(gc, ret);
return true;
}
@@ -96,6 +96,20 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
}
EXPORT_SYMBOL(of_get_named_gpiod_flags);
+int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
+ int index, enum of_gpio_flags *flags)
+{
+ struct gpio_desc *desc;
+
+ desc = of_get_named_gpiod_flags(np, list_name, index, flags);
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ else
+ return desc_to_gpio(desc);
+}
+EXPORT_SYMBOL(of_get_named_gpio_flags);
+
/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f48817d97480..d9c9cb4665db 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1363,6 +1363,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler)
{
+ if (gpiochip->can_sleep) {
+ chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
+ return;
+ }
+
irq_set_chained_handler(parent_irq, parent_handler);
/*
* The parent irqchip is already using the chip_data for this
@@ -1372,6 +1377,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpiochip_irq_lock_class;
+
/**
* gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
* @d: the irqdomain used by this irqchip
@@ -1388,22 +1399,35 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
struct gpio_chip *chip = d->host_data;
irq_set_chip_data(irq, chip);
+ irq_set_lockdep_class(irq, &gpiochip_irq_lock_class);
irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
+ /* Chips that can sleep need nested thread handlers */
+ if (chip->can_sleep)
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
irq_set_noprobe(irq);
#endif
- irq_set_irq_type(irq, chip->irq_default_type);
+ /*
+ * No set-up of the hardware will happen if IRQ_TYPE_NONE
+ * is passed as default type.
+ */
+ if (chip->irq_default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, chip->irq_default_type);
return 0;
}
static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
+ struct gpio_chip *chip = d->host_data;
+
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
+ if (chip->can_sleep)
+ irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
@@ -1471,7 +1495,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @first_irq: if not dynamically assigned, the base (first) IRQ to
* allocate gpiochip irqs from
* @handler: the irq handler to use (often a predefined irq core function)
- * @type: the default type for IRQs on this irqchip
+ * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
+ * to have the core avoid setting up any default type in the hardware.
*
* This function closely associates a certain irqchip with a certain
* gpiochip, providing an irq domain to translate the local IRQs to
@@ -2571,22 +2596,27 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
mutex_unlock(&gpio_lookup_lock);
}
-#ifdef CONFIG_OF
static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
{
+ static const char *suffixes[] = { "gpios", "gpio" };
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
struct gpio_desc *desc;
+ unsigned int i;
- if (con_id)
- snprintf(prop_name, 32, "%s-gpios", con_id);
- else
- snprintf(prop_name, 32, "gpios");
+ for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
+ if (con_id)
+ snprintf(prop_name, 32, "%s-%s", con_id, suffixes[i]);
+ else
+ snprintf(prop_name, 32, "%s", suffixes[i]);
- desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
- &of_flags);
+ desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
+ &of_flags);
+ if (!IS_ERR(desc))
+ break;
+ }
if (IS_ERR(desc))
return desc;
@@ -2596,14 +2626,6 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
return desc;
}
-#else
-static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
@@ -2701,7 +2723,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
}
/**
- * gpio_get - obtain a GPIO for a given GPIO function
+ * gpiod_get - obtain a GPIO for a given GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
*
@@ -2716,6 +2738,22 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id)
EXPORT_SYMBOL_GPL(gpiod_get);
/**
+ * gpiod_get_optional - obtain an optional GPIO for a given GPIO function
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ *
+ * This is equivalent to gpiod_get(), except that when no GPIO was assigned to
+ * the requested function it will return NULL. This is convenient for drivers
+ * that need to handle optional GPIOs.
+ */
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
+ const char *con_id)
+{
+ return gpiod_get_index_optional(dev, con_id, 0);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_optional);
+
+/**
* gpiod_get_index - obtain a GPIO from a multi-index GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
@@ -2778,6 +2816,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
+ * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO
+ * function
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ * @index: index of the GPIO to obtain in the consumer
+ *
+ * This is equivalent to gpiod_get_index(), except that when no GPIO with the
+ * specified index was assigned to the requested function it will return NULL.
+ * This is convenient for drivers that need to handle optional GPIOs.
+ */
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
+ const char *con_id,
+ unsigned int index)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_get_index(dev, con_id, index);
+ if (IS_ERR(desc)) {
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
+
+/**
* gpiod_put - dispose of a GPIO descriptor
* @desc: GPIO descriptor to dispose of
*
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index cf092941a9fd..1a4103dd38df 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -15,6 +15,8 @@
#include <linux/err.h>
#include <linux/device.h>
+enum of_gpio_flags;
+
/**
* struct acpi_gpio_info - ACPI GPIO specific information
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
@@ -46,4 +48,7 @@ acpi_get_gpiod_by_index(struct device *dev, int index,
int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags);
+
#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex);
- i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 108e1ec2fa4b..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
#define WATCH_LISTS 0
#define WATCH_GTT 0
-#define I915_GEM_PHYS_CURSOR_0 1
-#define I915_GEM_PHYS_CURSOR_1 2
-#define I915_GEM_PHYS_OVERLAY_REGS 3
-#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
-
-struct drm_i915_gem_phys_object {
- int id;
- struct page **page_list;
- drm_dma_handle_t *handle;
- struct drm_i915_gem_object *cur_obj;
-};
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
struct drm_file *pin_filp;
/** for phy allocated objects */
- struct drm_i915_gem_phys_object *phys_obj;
+ drm_dma_handle_t *phys_handle;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
+#define PIN_OFFSET_BIAS 0x8
+#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- unsigned flags);
+ uint64_t flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
-int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- int id,
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
int min_size,
unsigned alignment,
unsigned cache_level,
+ unsigned long start,
+ unsigned long end,
unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
-static int i915_gem_phys_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+{
+ drm_dma_handle_t *phys = obj->phys_handle;
+
+ if (!phys)
+ return;
+
+ if (obj->madv == I915_MADV_WILLNEED) {
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = phys->vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(dst, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ vaddr += PAGE_SIZE;
+ }
+ i915_gem_chipset_flush(obj->base.dev);
+ }
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ obj->phys_handle = NULL;
+}
+
+int
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+ int align)
+{
+ drm_dma_handle_t *phys;
+ struct address_space *mapping;
+ char *vaddr;
+ int i;
+
+ if (obj->phys_handle) {
+ if ((unsigned long)obj->phys_handle->vaddr & (align -1))
+ return -EBUSY;
+
+ return 0;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ if (obj->base.filp == NULL)
+ return -EINVAL;
+
+ /* create a new object */
+ phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
+ if (!phys)
+ return -ENOMEM;
+
+ vaddr = phys->vaddr;
+#ifdef CONFIG_X86
+ set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
+#endif
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ return PTR_ERR(page);
+ }
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ vaddr += PAGE_SIZE;
+ }
+
+ obj->phys_handle = phys;
+ return 0;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = obj->base.dev;
+ void *vaddr = obj->phys_handle->vaddr + args->offset;
+ char __user *user_data = to_user_ptr(args->data_ptr);
+
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
+
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
+
+ i915_gem_chipset_flush(dev);
+ return 0;
+}
+
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_obj) {
- ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ if (obj->phys_handle) {
+ ret = i915_gem_phys_pwrite(obj, args, file);
goto out;
}
@@ -3208,12 +3326,14 @@ static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- unsigned flags)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- size_t gtt_max =
+ unsigned long start =
+ flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ unsigned long end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->base.size > gtt_max) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+ if (obj->base.size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total",
- gtt_max);
+ end);
return ERR_PTR(-E2BIG);
}
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
- obj->cache_level, 0, gtt_max,
+ obj->cache_level,
+ start, end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, vm, size, alignment,
- obj->cache_level, flags);
+ obj->cache_level,
+ start, end,
+ flags);
if (ret == 0)
goto search_free;
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return ret;
}
+static bool
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (alignment &&
+ vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- unsigned flags)
+ uint64_t flags)
{
struct i915_vma *vma;
int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if ((alignment &&
- vma->node.start & (alignment - 1)) ||
- (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
+ if (i915_vma_misplaced(vma, alignment, flags)) {
WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment,
- flags & PIN_MAPPABLE,
+ !!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret;
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
+ i915_gem_object_detach_phys(obj);
+
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
-/*
- * Create a physically contiguous memory object for this object
- * e.g. for cursor + overlay regs
- */
-static int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
- int ret;
-
- if (dev_priv->mm.phys_objs[id - 1] || !size)
- return 0;
-
- phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
- if (!phys_obj)
- return -ENOMEM;
-
- phys_obj->id = id;
-
- phys_obj->handle = drm_pci_alloc(dev, size, align);
- if (!phys_obj->handle) {
- ret = -ENOMEM;
- goto kfree_obj;
- }
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
-
- dev_priv->mm.phys_objs[id - 1] = phys_obj;
-
- return 0;
-kfree_obj:
- kfree(phys_obj);
- return ret;
-}
-
-static void i915_gem_free_phys_object(struct drm_device *dev, int id)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
-
- if (!dev_priv->mm.phys_objs[id - 1])
- return;
-
- phys_obj = dev_priv->mm.phys_objs[id - 1];
- if (phys_obj->cur_obj) {
- i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
- }
-
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
- drm_pci_free(dev, phys_obj->handle);
- kfree(phys_obj);
- dev_priv->mm.phys_objs[id - 1] = NULL;
-}
-
-void i915_gem_free_all_phys_object(struct drm_device *dev)
-{
- int i;
-
- for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
- i915_gem_free_phys_object(dev, i);
-}
-
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- char *vaddr;
- int i;
- int page_count;
-
- if (!obj->phys_obj)
- return;
- vaddr = obj->phys_obj->handle->vaddr;
-
- page_count = obj->base.size / PAGE_SIZE;
- for (i = 0; i < page_count; i++) {
- struct page *page = shmem_read_mapping_page(mapping, i);
- if (!IS_ERR(page)) {
- char *dst = kmap_atomic(page);
- memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
- kunmap_atomic(dst);
-
- drm_clflush_pages(&page, 1);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
- }
- }
- i915_gem_chipset_flush(dev);
-
- obj->phys_obj->cur_obj = NULL;
- obj->phys_obj = NULL;
-}
-
-int
-i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- int id,
- int align)
-{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
- int page_count;
- int i;
-
- if (id > I915_MAX_PHYS_OBJECT)
- return -EINVAL;
-
- if (obj->phys_obj) {
- if (obj->phys_obj->id == id)
- return 0;
- i915_gem_detach_phys_object(dev, obj);
- }
-
- /* create a new object */
- if (!dev_priv->mm.phys_objs[id - 1]) {
- ret = i915_gem_init_phys_object(dev, id,
- obj->base.size, align);
- if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n",
- id, obj->base.size);
- return ret;
- }
- }
-
- /* bind to the object */
- obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj->phys_obj->cur_obj = obj;
-
- page_count = obj->base.size / PAGE_SIZE;
-
- for (i = 0; i < page_count; i++) {
- struct page *page;
- char *dst, *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- src = kmap_atomic(page);
- dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src);
-
- mark_page_accessed(page);
- page_cache_release(page);
- }
-
- return 0;
-}
-
-static int
-i915_gem_phys_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
-
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
- unsigned long unwritten;
-
- /* The physical object once assigned is fixed for the lifetime
- * of the obj, so we can safely drop the lock and continue
- * to access vaddr.
- */
- mutex_unlock(&dev->struct_mutex);
- unwritten = copy_from_user(vaddr, user_data, args->size);
- mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
- }
-
- i915_gem_chipset_flush(dev);
- return 0;
-}
-
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
+ unsigned long start, unsigned long end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
*/
INIT_LIST_HEAD(&unwind_list);
- if (flags & PIN_MAPPABLE) {
- BUG_ON(!i915_is_ggtt(vm));
+ if (start != 0 || end != vm->total) {
drm_mm_init_scan_with_range(&vm->mm, min_size,
- alignment, cache_level, 0,
- dev_priv->gtt.mappable_end);
+ alignment, cache_level,
+ start, end);
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+
+#define BATCH_OFFSET_BIAS (256*1024)
struct eb_vmas {
struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence;
- unsigned flags;
+ uint64_t flags;
int ret;
flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
+static bool
+eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool need_fence, need_mappable;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(vma);
+
+ WARN_ON((need_mappable || need_fence) &&
+ !i915_is_ggtt(vma->vm));
+
+ if (entry->alignment &&
+ vma->node.start & (entry->alignment - 1))
+ return true;
+
+ if (need_mappable && !obj->map_and_fenceable)
+ return true;
+
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+ vma->node.start < BATCH_OFFSET_BIAS)
+ return true;
+
+ return false;
+}
+
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- bool need_fence, need_mappable;
-
- obj = vma->obj;
-
if (!drm_mm_node_allocated(&vma->node))
continue;
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(vma);
-
- WARN_ON((need_mappable || need_fence) &&
- !i915_is_ggtt(vma->vm));
-
- if ((entry->alignment &&
- vma->node.start & (entry->alignment - 1)) ||
- (need_mappable && !obj->map_and_fenceable))
+ if (eb_vma_misplaced(vma, has_fenced_gpu_access))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
* relocations were valid.
*/
for (j = 0; j < exec[i].relocation_count; j++) {
- if (copy_to_user(&user_relocs[j].presumed_offset,
- &invalid_offset,
- sizeof(invalid_offset))) {
+ if (__copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static struct drm_i915_gem_object *
+eb_get_batch(struct eb_vmas *eb)
+{
+ struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+ return vma->obj;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
+ batch_obj = eb_get_batch(eb);
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
+ struct drm_i915_gem_exec_object __user *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+
/* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ break;
+ }
}
}
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ struct drm_i915_gem_exec_object2 *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+ int i;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user\n",
+ args->buffer_count);
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 154b0f8bb88d..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1089,7 +1089,9 @@ alloc:
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_CACHE_NONE, 0);
+ I915_CACHE_NONE,
+ 0, dev_priv->gtt.base.total,
+ 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 48aa516a1ac0..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, obj,
- (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
- align);
+ ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj->phys_obj->handle->busaddr;
+ addr = obj->phys_handle->busaddr;
}
if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
- if (INTEL_INFO(dev)->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != obj)
- i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
- } else
+ if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
- ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS,
- PAGE_SIZE);
+ ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
- overlay->reg_bo->phys_obj->handle->vaddr;
+ overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
else
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
}
if (outp == 8)
- return false;
+ return conf;
data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
case 0x00: return 2;
case 0x19: return 1;
case 0x1c: return 0;
+ case 0x1e: return 2;
default:
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 68528619834a..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1642,6 +1642,7 @@ struct radeon_vce {
unsigned fb_version;
atomic_t handles[RADEON_MAX_VCE_HANDLES];
struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
+ unsigned img_size[RADEON_MAX_VCE_HANDLES];
struct delayed_work idle_work;
};
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
int radeon_vce_cs_parse(struct radeon_cs_parser *p);
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+ (rdev->family == CHIP_MULLINS))
#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
(rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
}
+ if (!found) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
if (!found)
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains;
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+ "for command submission\n");
+ return -EINVAL;
+ }
+
p->relocs[i].domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("Ring %d requires VM!\n", p->ring);
- return -EINVAL;
+ if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
+ if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
+ return -EINVAL;
+ }
+ } else {
+ if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
+ DRM_ERROR("VM not supported on ring %d!\n",
+ p->ring);
+ return -EINVAL;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0e770bbf7e29..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev);
- if (fbcon) {
- radeon_fbdev_set_suspend(rdev, 0);
- console_unlock();
- }
-
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ if (fbcon) {
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 408b6ac53f0b..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = min(128 / post_div, ref_div_max);
+ ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
/* avoid high jitter with small fractional dividers */
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
- fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+ fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
if (fb_div < fb_div_min) {
unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
fb_div *= tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return r;
}
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
- }
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
- /* map the ib pool buffer read only into
- * virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
- RADEON_VM_PAGE_READABLE |
- RADEON_VM_PAGE_SNOOPED);
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
}
-
file_priv->driver_priv = fpriv;
}
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct radeon_bo_va *bo_va;
int r;
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (!r) {
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ }
}
radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+ if ((lobj->alt_domain & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) > rdev->mc.visible_vram_size) {
- /* hurrah the memory is not visible ! */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
- if (unlikely(r != 0))
- return r;
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should not happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
- }
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return 0;
+
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) <= rdev->mc.visible_vram_size)
+ return 0;
+
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ if (unlikely(r == -ENOMEM)) {
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(bo, &rbo->placement, false, false);
+ } else if (unlikely(r != 0)) {
+ return r;
}
+
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f30b8426eee2..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set profile when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set method when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ count = -EINVAL;
+ goto fail;
+ }
+
/* we don't support the legacy modes with dpm */
if (rdev->pm.pm_method == PM_METHOD_DPM) {
count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set dpm state when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
enum radeon_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
+ struct drm_device *ddev = rdev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (rdev->asic->pm.get_temperature)
temp = radeon_get_temperature(rdev);
else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *ddev = rdev->ddev;
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (rdev->asic->dpm.debugfs_print_current_performance_level)
radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f73324c81491..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
* @p: parser context
* @lo: address of lower dword
* @hi: address of higher dword
+ * @size: size of checker for relocation buffer
*
* Patch relocation inside command stream with real buffer address
*/
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+ unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- uint64_t offset;
+ struct radeon_cs_reloc *reloc;
+ uint64_t start, end, offset;
unsigned idx;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
return -EINVAL;
}
- offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
- p->ib.ptr[lo] = offset & 0xFFFFFFFF;
- p->ib.ptr[hi] = offset >> 32;
+ p->ib.ptr[lo] = start & 0xFFFFFFFF;
+ p->ib.ptr[hi] = start >> 32;
+
+ if (end <= start) {
+ DRM_ERROR("invalid reloc offset %llX!\n", offset);
+ return -EINVAL;
+ }
+ if ((end - start) < size) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), size);
+ return -EINVAL;
+ }
return 0;
}
/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+ unsigned i;
+
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ return i;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+ p->rdev->vce.filp[i] = p->filp;
+ p->rdev->vce.img_size[i] = 0;
+ return i;
+ }
+ }
+
+ DRM_ERROR("No more free VCE handles!\n");
+ return -EINVAL;
+}
+
+/**
* radeon_vce_cs_parse - parse and validate the command stream
*
* @p: parser context
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
*/
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
- uint32_t handle = 0;
- bool destroy = false;
+ int session_idx = -1;
+ bool destroyed = false;
+ uint32_t tmp, handle = 0;
+ uint32_t *size = &tmp;
int i, r;
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (destroyed) {
+ DRM_ERROR("No other command allowed after destroy!\n");
+ return -EINVAL;
+ }
+
switch (cmd) {
case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2);
+ session_idx = radeon_vce_validate_handle(p, handle);
+ if (session_idx < 0)
+ return session_idx;
+ size = &p->rdev->vce.img_size[session_idx];
break;
case 0x00000002: // task info
+ break;
+
case 0x01000001: // create
+ *size = radeon_get_ib_value(p, p->idx + 8) *
+ radeon_get_ib_value(p, p->idx + 10) *
+ 8 * 3 / 2;
+ break;
+
case 0x04000001: // config extension
case 0x04000002: // pic control
case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break;
case 0x03000001: // encode
- r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+ r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+ *size);
if (r)
return r;
- r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+ r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+ *size / 3);
if (r)
return r;
break;
case 0x02000001: // destroy
- destroy = true;
+ destroyed = true;
break;
case 0x05000001: // context buffer
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ *size * 2);
+ if (r)
+ return r;
+ break;
+
case 0x05000004: // video bitstream buffer
+ tmp = radeon_get_ib_value(p, p->idx + 4);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ tmp);
+ if (r)
+ return r;
+ break;
+
case 0x05000005: // feedback buffer
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ 4096);
if (r)
return r;
break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ return -EINVAL;
+ }
+
p->idx += len / 4;
}
- if (destroy) {
+ if (destroyed) {
/* IB contains a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
- return 0;
- }
-
- /* create or encode, validate the handle */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
- return 0;
}
- /* handle not found try to alloc a new one */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
- p->rdev->vce.filp[i] = p->filp;
- return 0;
- }
- }
-
- DRM_ERROR("No more free VCE handles!\n");
- return -EINVAL;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct list_head *head)
{
struct radeon_cs_reloc *list;
- unsigned i, idx, size;
+ unsigned i, idx;
- size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
- list = kmalloc(size, GFP_KERNEL);
+ list = kmalloc_array(vm->max_pde_used + 1,
+ sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (!list)
return NULL;
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 12;
+ ndw += vm->max_pde_used * 16;
/* update too big for an IB */
if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 8)
-# define SPLL_REFCLK_SEL_MASK 0xFF00
+# define SPLL_REFCLK_SEL(x) ((x) << 26)
+# define SPLL_REFCLK_SEL_MASK (3 << 26)
#define CG_SPLL_SPREAD_SPECTRUM 0x620
#define SSEN (1 << 0)
diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig
index d94e38dd80c7..2c76de438eb1 100644
--- a/drivers/hsi/Kconfig
+++ b/drivers/hsi/Kconfig
@@ -14,6 +14,7 @@ config HSI_BOARDINFO
bool
default y
+source "drivers/hsi/controllers/Kconfig"
source "drivers/hsi/clients/Kconfig"
endif # HSI
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
index 9d5d33f90de2..360371e134f1 100644
--- a/drivers/hsi/Makefile
+++ b/drivers/hsi/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o
obj-$(CONFIG_HSI) += hsi.o
+obj-y += controllers/
obj-y += clients/
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
index 3bacd275f479..71b9f9ab86e4 100644
--- a/drivers/hsi/clients/Kconfig
+++ b/drivers/hsi/clients/Kconfig
@@ -4,6 +4,23 @@
comment "HSI clients"
+config NOKIA_MODEM
+ tristate "Nokia Modem"
+ depends on HSI && SSI_PROTOCOL
+ help
+ Say Y here if you want to add support for the modem on Nokia
+ N900 (Nokia RX-51) hardware.
+
+ If unsure, say N.
+
+config SSI_PROTOCOL
+ tristate "SSI protocol"
+ depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m)
+ help
+ If you say Y here, you will enable the SSI protocol aka McSAAB.
+
+ If unsure, say N.
+
config HSI_CHAR
tristate "HSI/SSI character driver"
depends on HSI
diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile
index 327c0e27c8b0..4d5bc0e0b27b 100644
--- a/drivers/hsi/clients/Makefile
+++ b/drivers/hsi/clients/Makefile
@@ -2,4 +2,6 @@
# Makefile for HSI clients
#
-obj-$(CONFIG_HSI_CHAR) += hsi_char.o
+obj-$(CONFIG_NOKIA_MODEM) += nokia-modem.o
+obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o
+obj-$(CONFIG_HSI_CHAR) += hsi_char.o
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index e61e5f991aa5..57f70c28fa38 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -367,7 +367,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
return -EINVAL;
tmp = cl->rx_cfg;
cl->rx_cfg.mode = rxc->mode;
- cl->rx_cfg.channels = rxc->channels;
+ cl->rx_cfg.num_hw_channels = rxc->channels;
cl->rx_cfg.flow = rxc->flow;
ret = hsi_setup(cl);
if (ret < 0) {
@@ -383,7 +383,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
{
rxc->mode = cl->rx_cfg.mode;
- rxc->channels = cl->rx_cfg.channels;
+ rxc->channels = cl->rx_cfg.num_hw_channels;
rxc->flow = cl->rx_cfg.flow;
}
@@ -402,7 +402,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
return -EINVAL;
tmp = cl->tx_cfg;
cl->tx_cfg.mode = txc->mode;
- cl->tx_cfg.channels = txc->channels;
+ cl->tx_cfg.num_hw_channels = txc->channels;
cl->tx_cfg.speed = txc->speed;
cl->tx_cfg.arb_mode = txc->arb_mode;
ret = hsi_setup(cl);
@@ -417,7 +417,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
{
txc->mode = cl->tx_cfg.mode;
- txc->channels = cl->tx_cfg.channels;
+ txc->channels = cl->tx_cfg.num_hw_channels;
txc->speed = cl->tx_cfg.speed;
txc->arb_mode = cl->tx_cfg.arb_mode;
}
@@ -435,7 +435,7 @@ static ssize_t hsc_read(struct file *file, char __user *buf, size_t len,
return -EINVAL;
if (len > max_data_size)
len = max_data_size;
- if (channel->ch >= channel->cl->rx_cfg.channels)
+ if (channel->ch >= channel->cl->rx_cfg.num_hw_channels)
return -ECHRNG;
if (test_and_set_bit(HSC_CH_READ, &channel->flags))
return -EBUSY;
@@ -492,7 +492,7 @@ static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len,
return -EINVAL;
if (len > max_data_size)
len = max_data_size;
- if (channel->ch >= channel->cl->tx_cfg.channels)
+ if (channel->ch >= channel->cl->tx_cfg.num_hw_channels)
return -ECHRNG;
if (test_and_set_bit(HSC_CH_WRITE, &channel->flags))
return -EBUSY;
@@ -705,7 +705,7 @@ static int hsc_probe(struct device *dev)
if (!hsc_major) {
ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor,
HSC_DEVS, devname);
- if (ret > 0)
+ if (ret == 0)
hsc_major = MAJOR(hsc_dev);
} else {
hsc_dev = MKDEV(hsc_major, hsc_baseminor);
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
new file mode 100644
index 000000000000..363b780dacea
--- /dev/null
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -0,0 +1,285 @@
+/*
+ * nokia-modem.c
+ *
+ * HSI client driver for Nokia N900 modem.
+ *
+ * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/hsi/hsi.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/hsi/ssi_protocol.h>
+
+static unsigned int pm;
+module_param(pm, int, 0400);
+MODULE_PARM_DESC(pm,
+ "Enable power management (0=disabled, 1=userland based [default])");
+
+struct nokia_modem_gpio {
+ struct gpio_desc *gpio;
+ const char *name;
+};
+
+struct nokia_modem_device {
+ struct tasklet_struct nokia_modem_rst_ind_tasklet;
+ int nokia_modem_rst_ind_irq;
+ struct device *device;
+ struct nokia_modem_gpio *gpios;
+ int gpio_amount;
+ struct hsi_client *ssi_protocol;
+};
+
+static void do_nokia_modem_rst_ind_tasklet(unsigned long data)
+{
+ struct nokia_modem_device *modem = (struct nokia_modem_device *)data;
+
+ if (!modem)
+ return;
+
+ dev_info(modem->device, "CMT rst line change detected\n");
+
+ if (modem->ssi_protocol)
+ ssip_reset_event(modem->ssi_protocol);
+}
+
+static irqreturn_t nokia_modem_rst_ind_isr(int irq, void *data)
+{
+ struct nokia_modem_device *modem = (struct nokia_modem_device *)data;
+
+ tasklet_schedule(&modem->nokia_modem_rst_ind_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void nokia_modem_gpio_unexport(struct device *dev)
+{
+ struct nokia_modem_device *modem = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < modem->gpio_amount; i++) {
+ sysfs_remove_link(&dev->kobj, modem->gpios[i].name);
+ gpiod_unexport(modem->gpios[i].gpio);
+ }
+}
+
+static int nokia_modem_gpio_probe(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct nokia_modem_device *modem = dev_get_drvdata(dev);
+ int gpio_count, gpio_name_count, i, err;
+
+ gpio_count = of_gpio_count(np);
+
+ if (gpio_count < 0) {
+ dev_err(dev, "missing gpios: %d\n", gpio_count);
+ return gpio_count;
+ }
+
+ gpio_name_count = of_property_count_strings(np, "gpio-names");
+
+ if (gpio_count != gpio_name_count) {
+ dev_err(dev, "number of gpios does not equal number of gpio names\n");
+ return -EINVAL;
+ }
+
+ modem->gpios = devm_kzalloc(dev, gpio_count *
+ sizeof(struct nokia_modem_gpio), GFP_KERNEL);
+ if (!modem->gpios) {
+ dev_err(dev, "Could not allocate memory for gpios\n");
+ return -ENOMEM;
+ }
+
+ modem->gpio_amount = gpio_count;
+
+ for (i = 0; i < gpio_count; i++) {
+ modem->gpios[i].gpio = devm_gpiod_get_index(dev, NULL, i);
+ if (IS_ERR(modem->gpios[i].gpio)) {
+ dev_err(dev, "Could not get gpio %d\n", i);
+ return PTR_ERR(modem->gpios[i].gpio);
+ }
+
+ err = of_property_read_string_index(np, "gpio-names", i,
+ &(modem->gpios[i].name));
+ if (err) {
+ dev_err(dev, "Could not get gpio name %d\n", i);
+ return err;
+ }
+
+ err = gpiod_direction_output(modem->gpios[i].gpio, 0);
+ if (err)
+ return err;
+
+ err = gpiod_export(modem->gpios[i].gpio, 0);
+ if (err)
+ return err;
+
+ err = gpiod_export_link(dev, modem->gpios[i].name,
+ modem->gpios[i].gpio);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int nokia_modem_probe(struct device *dev)
+{
+ struct device_node *np;
+ struct nokia_modem_device *modem;
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct hsi_port *port = hsi_get_port(cl);
+ int irq, pflags, err;
+ struct hsi_board_info ssip;
+
+ np = dev->of_node;
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENXIO;
+ }
+
+ modem = devm_kzalloc(dev, sizeof(*modem), GFP_KERNEL);
+ if (!modem) {
+ dev_err(dev, "Could not allocate memory for nokia_modem_device\n");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(dev, modem);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq < 0) {
+ dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq);
+ return irq;
+ }
+ modem->nokia_modem_rst_ind_irq = irq;
+ pflags = irq_get_trigger_type(irq);
+
+ tasklet_init(&modem->nokia_modem_rst_ind_tasklet,
+ do_nokia_modem_rst_ind_tasklet, (unsigned long)modem);
+ err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr,
+ IRQF_DISABLED | pflags, "modem_rst_ind", modem);
+ if (err < 0) {
+ dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n",
+ irq, pflags);
+ return err;
+ }
+ enable_irq_wake(irq);
+
+ if(pm) {
+ err = nokia_modem_gpio_probe(dev);
+ if (err < 0) {
+ dev_err(dev, "Could not probe GPIOs\n");
+ goto error1;
+ }
+ }
+
+ ssip.name = "ssi-protocol";
+ ssip.tx_cfg = cl->tx_cfg;
+ ssip.rx_cfg = cl->rx_cfg;
+ ssip.platform_data = NULL;
+ ssip.archdata = NULL;
+
+ modem->ssi_protocol = hsi_new_client(port, &ssip);
+ if (!modem->ssi_protocol) {
+ dev_err(dev, "Could not register ssi-protocol device\n");
+ goto error2;
+ }
+
+ err = device_attach(&modem->ssi_protocol->device);
+ if (err == 0) {
+ dev_err(dev, "Missing ssi-protocol driver\n");
+ err = -EPROBE_DEFER;
+ goto error3;
+ } else if (err < 0) {
+ dev_err(dev, "Could not load ssi-protocol driver (%d)\n", err);
+ goto error3;
+ }
+
+ /* TODO: register cmt-speech hsi client */
+
+ dev_info(dev, "Registered Nokia HSI modem\n");
+
+ return 0;
+
+error3:
+ hsi_remove_client(&modem->ssi_protocol->device, NULL);
+error2:
+ nokia_modem_gpio_unexport(dev);
+error1:
+ disable_irq_wake(modem->nokia_modem_rst_ind_irq);
+ tasklet_kill(&modem->nokia_modem_rst_ind_tasklet);
+
+ return err;
+}
+
+static int nokia_modem_remove(struct device *dev)
+{
+ struct nokia_modem_device *modem = dev_get_drvdata(dev);
+
+ if (!modem)
+ return 0;
+
+ if (modem->ssi_protocol) {
+ hsi_remove_client(&modem->ssi_protocol->device, NULL);
+ modem->ssi_protocol = NULL;
+ }
+
+ nokia_modem_gpio_unexport(dev);
+ dev_set_drvdata(dev, NULL);
+ disable_irq_wake(modem->nokia_modem_rst_ind_irq);
+ tasklet_kill(&modem->nokia_modem_rst_ind_tasklet);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id nokia_modem_of_match[] = {
+ { .compatible = "nokia,n900-modem", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nokia_modem_of_match);
+#endif
+
+static struct hsi_client_driver nokia_modem_driver = {
+ .driver = {
+ .name = "nokia-modem",
+ .owner = THIS_MODULE,
+ .probe = nokia_modem_probe,
+ .remove = nokia_modem_remove,
+ .of_match_table = of_match_ptr(nokia_modem_of_match),
+ },
+};
+
+static int __init nokia_modem_init(void)
+{
+ return hsi_register_client_driver(&nokia_modem_driver);
+}
+module_init(nokia_modem_init);
+
+static void __exit nokia_modem_exit(void)
+{
+ hsi_unregister_client_driver(&nokia_modem_driver);
+}
+module_exit(nokia_modem_exit);
+
+MODULE_ALIAS("hsi:nokia-modem");
+MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
+MODULE_DESCRIPTION("HSI driver module for Nokia N900 Modem");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
new file mode 100644
index 000000000000..ce4be3738d46
--- /dev/null
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -0,0 +1,1191 @@
+/*
+ * ssi_protocol.c
+ *
+ * Implementation of the SSI McSAAB improved protocol.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org>
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/if_phonet.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/hsi/hsi.h>
+#include <linux/hsi/ssi_protocol.h>
+
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
+
+#define SSIP_TXQUEUE_LEN 100
+#define SSIP_MAX_MTU 65535
+#define SSIP_DEFAULT_MTU 4000
+#define PN_MEDIA_SOS 21
+#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
+#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
+#define SSIP_KATOUT 15 /* 15 msecs */
+#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
+#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
+#define SSIP_CMT_LOADER_SYNC 0x11223344
+/*
+ * SSI protocol command definitions
+ */
+#define SSIP_COMMAND(data) ((data) >> 28)
+#define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
+/* Commands */
+#define SSIP_SW_BREAK 0
+#define SSIP_BOOTINFO_REQ 1
+#define SSIP_BOOTINFO_RESP 2
+#define SSIP_WAKETEST_RESULT 3
+#define SSIP_START_TRANS 4
+#define SSIP_READY 5
+/* Payloads */
+#define SSIP_DATA_VERSION(data) ((data) & 0xff)
+#define SSIP_LOCAL_VERID 1
+#define SSIP_WAKETEST_OK 0
+#define SSIP_WAKETEST_FAILED 1
+#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
+#define SSIP_MSG_ID(data) ((data) & 0xff)
+/* Generic Command */
+#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
+/* Commands for the control channel */
+#define SSIP_BOOTINFO_REQ_CMD(ver) \
+ SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
+#define SSIP_BOOTINFO_RESP_CMD(ver) \
+ SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
+#define SSIP_START_TRANS_CMD(pdulen, id) \
+ SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
+#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
+#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
+
+/* Main state machine states */
+enum {
+ INIT,
+ HANDSHAKE,
+ ACTIVE,
+};
+
+/* Send state machine states */
+enum {
+ SEND_IDLE,
+ WAIT4READY,
+ SEND_READY,
+ SENDING,
+ SENDING_SWBREAK,
+};
+
+/* Receive state machine states */
+enum {
+ RECV_IDLE,
+ RECV_READY,
+ RECEIVING,
+};
+
+/**
+ * struct ssi_protocol - SSI protocol (McSAAB) data
+ * @main_state: Main state machine
+ * @send_state: TX state machine
+ * @recv_state: RX state machine
+ * @waketest: Flag to follow wake line test
+ * @rxid: RX data id
+ * @txid: TX data id
+ * @txqueue_len: TX queue length
+ * @tx_wd: TX watchdog
+ * @rx_wd: RX watchdog
+ * @keep_alive: Workaround for SSI HW bug
+ * @lock: To serialize access to this struct
+ * @netdev: Phonet network device
+ * @txqueue: TX data queue
+ * @cmdqueue: Queue of free commands
+ * @cl: HSI client own reference
+ * @link: Link for ssip_list
+ * @tx_usecount: Refcount to keep track the slaves that use the wake line
+ * @channel_id_cmd: HSI channel id for command stream
+ * @channel_id_data: HSI channel id for data stream
+ */
+struct ssi_protocol {
+ unsigned int main_state;
+ unsigned int send_state;
+ unsigned int recv_state;
+ unsigned int waketest:1;
+ u8 rxid;
+ u8 txid;
+ unsigned int txqueue_len;
+ struct timer_list tx_wd;
+ struct timer_list rx_wd;
+ struct timer_list keep_alive; /* wake-up workaround */
+ spinlock_t lock;
+ struct net_device *netdev;
+ struct list_head txqueue;
+ struct list_head cmdqueue;
+ struct hsi_client *cl;
+ struct list_head link;
+ atomic_t tx_usecnt;
+ int channel_id_cmd;
+ int channel_id_data;
+};
+
+/* List of ssi protocol instances */
+static LIST_HEAD(ssip_list);
+
+static void ssip_rxcmd_complete(struct hsi_msg *msg);
+
+static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
+{
+ u32 *data;
+
+ data = sg_virt(msg->sgt.sgl);
+ *data = cmd;
+}
+
+static inline u32 ssip_get_cmd(struct hsi_msg *msg)
+{
+ u32 *data;
+
+ data = sg_virt(msg->sgt.sgl);
+
+ return *data;
+}
+
+static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
+{
+ skb_frag_t *frag;
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
+
+ sg = msg->sgt.sgl;
+ sg_set_buf(sg, skb->data, skb_headlen(skb));
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ sg = sg_next(sg);
+ BUG_ON(!sg);
+ frag = &skb_shinfo(skb)->frags[i];
+ sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
+ }
+}
+
+static void ssip_free_data(struct hsi_msg *msg)
+{
+ struct sk_buff *skb;
+
+ skb = msg->context;
+ pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
+ skb);
+ msg->destructor = NULL;
+ dev_kfree_skb(skb);
+ hsi_free_msg(msg);
+}
+
+static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
+ struct sk_buff *skb, gfp_t flags)
+{
+ struct hsi_msg *msg;
+
+ msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
+ if (!msg)
+ return NULL;
+ ssip_skb_to_msg(skb, msg);
+ msg->destructor = ssip_free_data;
+ msg->channel = ssi->channel_id_data;
+ msg->context = skb;
+
+ return msg;
+}
+
+static inline void ssip_release_cmd(struct hsi_msg *msg)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
+
+ dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
+ spin_lock_bh(&ssi->lock);
+ list_add_tail(&msg->link, &ssi->cmdqueue);
+ spin_unlock_bh(&ssi->lock);
+}
+
+static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
+{
+ struct hsi_msg *msg;
+
+ BUG_ON(list_empty(&ssi->cmdqueue));
+
+ spin_lock_bh(&ssi->lock);
+ msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
+ list_del(&msg->link);
+ spin_unlock_bh(&ssi->lock);
+ msg->destructor = ssip_release_cmd;
+
+ return msg;
+}
+
+static void ssip_free_cmds(struct ssi_protocol *ssi)
+{
+ struct hsi_msg *msg, *tmp;
+
+ list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
+ list_del(&msg->link);
+ msg->destructor = NULL;
+ kfree(sg_virt(msg->sgt.sgl));
+ hsi_free_msg(msg);
+ }
+}
+
+static int ssip_alloc_cmds(struct ssi_protocol *ssi)
+{
+ struct hsi_msg *msg;
+ u32 *buf;
+ unsigned int i;
+
+ for (i = 0; i < SSIP_MAX_CMDS; i++) {
+ msg = hsi_alloc_msg(1, GFP_KERNEL);
+ if (!msg)
+ goto out;
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ hsi_free_msg(msg);
+ goto out;
+ }
+ sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
+ msg->channel = ssi->channel_id_cmd;
+ list_add_tail(&msg->link, &ssi->cmdqueue);
+ }
+
+ return 0;
+out:
+ ssip_free_cmds(ssi);
+
+ return -ENOMEM;
+}
+
+static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
+{
+ ssi->recv_state = state;
+ switch (state) {
+ case RECV_IDLE:
+ del_timer(&ssi->rx_wd);
+ if (ssi->send_state == SEND_IDLE)
+ del_timer(&ssi->keep_alive);
+ break;
+ case RECV_READY:
+ /* CMT speech workaround */
+ if (atomic_read(&ssi->tx_usecnt))
+ break;
+ /* Otherwise fall through */
+ case RECEIVING:
+ mod_timer(&ssi->keep_alive, jiffies +
+ msecs_to_jiffies(SSIP_KATOUT));
+ mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
+ break;
+ default:
+ break;
+ }
+}
+
+static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
+{
+ ssi->send_state = state;
+ switch (state) {
+ case SEND_IDLE:
+ case SEND_READY:
+ del_timer(&ssi->tx_wd);
+ if (ssi->recv_state == RECV_IDLE)
+ del_timer(&ssi->keep_alive);
+ break;
+ case WAIT4READY:
+ case SENDING:
+ case SENDING_SWBREAK:
+ mod_timer(&ssi->keep_alive,
+ jiffies + msecs_to_jiffies(SSIP_KATOUT));
+ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
+ break;
+ default:
+ break;
+ }
+}
+
+struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
+{
+ struct hsi_client *master = ERR_PTR(-ENODEV);
+ struct ssi_protocol *ssi;
+
+ list_for_each_entry(ssi, &ssip_list, link)
+ if (slave->device.parent == ssi->cl->device.parent) {
+ master = ssi->cl;
+ break;
+ }
+
+ return master;
+}
+EXPORT_SYMBOL_GPL(ssip_slave_get_master);
+
+int ssip_slave_start_tx(struct hsi_client *master)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(master);
+
+ dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
+ spin_lock_bh(&ssi->lock);
+ if (ssi->send_state == SEND_IDLE) {
+ ssip_set_txstate(ssi, WAIT4READY);
+ hsi_start_tx(master);
+ }
+ spin_unlock_bh(&ssi->lock);
+ atomic_inc(&ssi->tx_usecnt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
+
+int ssip_slave_stop_tx(struct hsi_client *master)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(master);
+
+ WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
+
+ if (atomic_dec_and_test(&ssi->tx_usecnt)) {
+ spin_lock_bh(&ssi->lock);
+ if ((ssi->send_state == SEND_READY) ||
+ (ssi->send_state == WAIT4READY)) {
+ ssip_set_txstate(ssi, SEND_IDLE);
+ hsi_stop_tx(master);
+ }
+ spin_unlock_bh(&ssi->lock);
+ }
+ dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
+
+int ssip_slave_running(struct hsi_client *master)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(master);
+ return netif_running(ssi->netdev);
+}
+EXPORT_SYMBOL_GPL(ssip_slave_running);
+
+static void ssip_reset(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct list_head *head, *tmp;
+ struct hsi_msg *msg;
+
+ if (netif_running(ssi->netdev))
+ netif_carrier_off(ssi->netdev);
+ hsi_flush(cl);
+ spin_lock_bh(&ssi->lock);
+ if (ssi->send_state != SEND_IDLE)
+ hsi_stop_tx(cl);
+ if (ssi->waketest)
+ ssi_waketest(cl, 0);
+ del_timer(&ssi->rx_wd);
+ del_timer(&ssi->tx_wd);
+ del_timer(&ssi->keep_alive);
+ ssi->main_state = 0;
+ ssi->send_state = 0;
+ ssi->recv_state = 0;
+ ssi->waketest = 0;
+ ssi->rxid = 0;
+ ssi->txid = 0;
+ list_for_each_safe(head, tmp, &ssi->txqueue) {
+ msg = list_entry(head, struct hsi_msg, link);
+ dev_dbg(&cl->device, "Pending TX data\n");
+ list_del(head);
+ ssip_free_data(msg);
+ }
+ ssi->txqueue_len = 0;
+ spin_unlock_bh(&ssi->lock);
+}
+
+static void ssip_dump_state(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ spin_lock_bh(&ssi->lock);
+ dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
+ dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
+ dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
+ dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
+ "Online" : "Offline");
+ dev_err(&cl->device, "Wake test %d\n", ssi->waketest);
+ dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
+ dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
+
+ list_for_each_entry(msg, &ssi->txqueue, link)
+ dev_err(&cl->device, "pending TX data (%p)\n", msg);
+ spin_unlock_bh(&ssi->lock);
+}
+
+static void ssip_error(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ ssip_dump_state(cl);
+ ssip_reset(cl);
+ msg = ssip_claim_cmd(ssi);
+ msg->complete = ssip_rxcmd_complete;
+ hsi_async_read(cl, msg);
+}
+
+static void ssip_keep_alive(unsigned long data)
+{
+ struct hsi_client *cl = (struct hsi_client *)data;
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
+ ssi->main_state, ssi->recv_state, ssi->send_state);
+
+ spin_lock(&ssi->lock);
+ if (ssi->recv_state == RECV_IDLE)
+ switch (ssi->send_state) {
+ case SEND_READY:
+ if (atomic_read(&ssi->tx_usecnt) == 0)
+ break;
+ /*
+ * Fall through. Workaround for cmt-speech
+ * in that case we relay on audio timers.
+ */
+ case SEND_IDLE:
+ spin_unlock(&ssi->lock);
+ return;
+ }
+ mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
+ spin_unlock(&ssi->lock);
+}
+
+static void ssip_wd(unsigned long data)
+{
+ struct hsi_client *cl = (struct hsi_client *)data;
+
+ dev_err(&cl->device, "Watchdog trigerred\n");
+ ssip_error(cl);
+}
+
+static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
+ msg = ssip_claim_cmd(ssi);
+ ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
+ msg->complete = ssip_release_cmd;
+ hsi_async_write(cl, msg);
+ dev_dbg(&cl->device, "Issuing RX command\n");
+ msg = ssip_claim_cmd(ssi);
+ msg->complete = ssip_rxcmd_complete;
+ hsi_async_read(cl, msg);
+}
+
+static void ssip_start_rx(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
+ ssi->recv_state);
+ spin_lock(&ssi->lock);
+ /*
+ * We can have two UP events in a row due to a short low
+ * high transition. Therefore we need to ignore the sencond UP event.
+ */
+ if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
+ if (ssi->main_state == INIT) {
+ ssi->main_state = HANDSHAKE;
+ spin_unlock(&ssi->lock);
+ ssip_send_bootinfo_req_cmd(cl);
+ } else {
+ spin_unlock(&ssi->lock);
+ }
+ return;
+ }
+ ssip_set_rxstate(ssi, RECV_READY);
+ spin_unlock(&ssi->lock);
+
+ msg = ssip_claim_cmd(ssi);
+ ssip_set_cmd(msg, SSIP_READY_CMD);
+ msg->complete = ssip_release_cmd;
+ dev_dbg(&cl->device, "Send READY\n");
+ hsi_async_write(cl, msg);
+}
+
+static void ssip_stop_rx(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
+ spin_lock(&ssi->lock);
+ if (likely(ssi->main_state == ACTIVE))
+ ssip_set_rxstate(ssi, RECV_IDLE);
+ spin_unlock(&ssi->lock);
+}
+
+static void ssip_free_strans(struct hsi_msg *msg)
+{
+ ssip_free_data(msg->context);
+ ssip_release_cmd(msg);
+}
+
+static void ssip_strans_complete(struct hsi_msg *msg)
+{
+ struct hsi_client *cl = msg->cl;
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *data;
+
+ data = msg->context;
+ ssip_release_cmd(msg);
+ spin_lock(&ssi->lock);
+ ssip_set_txstate(ssi, SENDING);
+ spin_unlock(&ssi->lock);
+ hsi_async_write(cl, data);
+}
+
+static int ssip_xmit(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg, *dmsg;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&ssi->lock);
+ if (list_empty(&ssi->txqueue)) {
+ spin_unlock_bh(&ssi->lock);
+ return 0;
+ }
+ dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
+ list_del(&dmsg->link);
+ ssi->txqueue_len--;
+ spin_unlock_bh(&ssi->lock);
+
+ msg = ssip_claim_cmd(ssi);
+ skb = dmsg->context;
+ msg->context = dmsg;
+ msg->complete = ssip_strans_complete;
+ msg->destructor = ssip_free_strans;
+
+ spin_lock_bh(&ssi->lock);
+ ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
+ ssi->txid));
+ ssi->txid++;
+ ssip_set_txstate(ssi, SENDING);
+ spin_unlock_bh(&ssi->lock);
+
+ dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
+ SSIP_BYTES_TO_FRAMES(skb->len));
+
+ return hsi_async_write(cl, msg);
+}
+
+/* In soft IRQ context */
+static void ssip_pn_rx(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+
+ if (unlikely(!netif_running(dev))) {
+ dev_dbg(&dev->dev, "Drop RX packet\n");
+ dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
+ return;
+ }
+ if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
+ dev_dbg(&dev->dev, "Error drop RX packet\n");
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+
+ /* length field is exchanged in network byte order */
+ ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
+ dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
+ ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
+
+ skb->protocol = htons(ETH_P_PHONET);
+ skb_reset_mac_header(skb);
+ __skb_pull(skb, 1);
+ netif_rx(skb);
+}
+
+static void ssip_rx_data_complete(struct hsi_msg *msg)
+{
+ struct hsi_client *cl = msg->cl;
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct sk_buff *skb;
+
+ if (msg->status == HSI_STATUS_ERROR) {
+ dev_err(&cl->device, "RX data error\n");
+ ssip_free_data(msg);
+ ssip_error(cl);
+ return;
+ }
+ del_timer(&ssi->rx_wd); /* FIXME: Revisit */
+ skb = msg->context;
+ ssip_pn_rx(skb);
+ hsi_free_msg(msg);
+}
+
+static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ /* Workaroud: Ignore CMT Loader message leftover */
+ if (cmd == SSIP_CMT_LOADER_SYNC)
+ return;
+
+ switch (ssi->main_state) {
+ case ACTIVE:
+ dev_err(&cl->device, "Boot info req on active state\n");
+ ssip_error(cl);
+ /* Fall through */
+ case INIT:
+ spin_lock(&ssi->lock);
+ ssi->main_state = HANDSHAKE;
+ if (!ssi->waketest) {
+ ssi->waketest = 1;
+ ssi_waketest(cl, 1); /* FIXME: To be removed */
+ }
+ /* Start boot handshake watchdog */
+ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
+ spin_unlock(&ssi->lock);
+ dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
+ if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
+ dev_warn(&cl->device, "boot info req verid mismatch\n");
+ msg = ssip_claim_cmd(ssi);
+ ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
+ msg->complete = ssip_release_cmd;
+ hsi_async_write(cl, msg);
+ break;
+ case HANDSHAKE:
+ /* Ignore */
+ break;
+ default:
+ dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
+ break;
+ }
+}
+
+static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
+ dev_warn(&cl->device, "boot info resp verid mismatch\n");
+
+ spin_lock(&ssi->lock);
+ if (ssi->main_state != ACTIVE)
+ /* Use tx_wd as a boot watchdog in non ACTIVE state */
+ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
+ else
+ dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
+ ssi->main_state);
+ spin_unlock(&ssi->lock);
+}
+
+static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ unsigned int wkres = SSIP_PAYLOAD(cmd);
+
+ spin_lock(&ssi->lock);
+ if (ssi->main_state != HANDSHAKE) {
+ dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
+ ssi->main_state);
+ spin_unlock(&ssi->lock);
+ return;
+ }
+ if (ssi->waketest) {
+ ssi->waketest = 0;
+ ssi_waketest(cl, 0); /* FIXME: To be removed */
+ }
+ ssi->main_state = ACTIVE;
+ del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
+ spin_unlock(&ssi->lock);
+
+ dev_notice(&cl->device, "WAKELINES TEST %s\n",
+ wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
+ if (wkres & SSIP_WAKETEST_FAILED) {
+ ssip_error(cl);
+ return;
+ }
+ dev_dbg(&cl->device, "CMT is ONLINE\n");
+ netif_wake_queue(ssi->netdev);
+ netif_carrier_on(ssi->netdev);
+}
+
+static void ssip_rx_ready(struct hsi_client *cl)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ spin_lock(&ssi->lock);
+ if (unlikely(ssi->main_state != ACTIVE)) {
+ dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
+ ssi->send_state, ssi->main_state);
+ spin_unlock(&ssi->lock);
+ return;
+ }
+ if (ssi->send_state != WAIT4READY) {
+ dev_dbg(&cl->device, "Ignore spurious READY command\n");
+ spin_unlock(&ssi->lock);
+ return;
+ }
+ ssip_set_txstate(ssi, SEND_READY);
+ spin_unlock(&ssi->lock);
+ ssip_xmit(cl);
+}
+
+static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct sk_buff *skb;
+ struct hsi_msg *msg;
+ int len = SSIP_PDU_LENGTH(cmd);
+
+ dev_dbg(&cl->device, "RX strans: %d frames\n", len);
+ spin_lock(&ssi->lock);
+ if (unlikely(ssi->main_state != ACTIVE)) {
+ dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
+ ssi->send_state, ssi->main_state);
+ spin_unlock(&ssi->lock);
+ return;
+ }
+ ssip_set_rxstate(ssi, RECEIVING);
+ if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
+ dev_err(&cl->device, "START TRANS id %d expeceted %d\n",
+ SSIP_MSG_ID(cmd), ssi->rxid);
+ spin_unlock(&ssi->lock);
+ goto out1;
+ }
+ ssi->rxid++;
+ spin_unlock(&ssi->lock);
+ skb = netdev_alloc_skb(ssi->netdev, len * 4);
+ if (unlikely(!skb)) {
+ dev_err(&cl->device, "No memory for rx skb\n");
+ goto out1;
+ }
+ skb->dev = ssi->netdev;
+ skb_put(skb, len * 4);
+ msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
+ if (unlikely(!msg)) {
+ dev_err(&cl->device, "No memory for RX data msg\n");
+ goto out2;
+ }
+ msg->complete = ssip_rx_data_complete;
+ hsi_async_read(cl, msg);
+
+ return;
+out2:
+ dev_kfree_skb(skb);
+out1:
+ ssip_error(cl);
+}
+
+static void ssip_rxcmd_complete(struct hsi_msg *msg)
+{
+ struct hsi_client *cl = msg->cl;
+ u32 cmd = ssip_get_cmd(msg);
+ unsigned int cmdid = SSIP_COMMAND(cmd);
+
+ if (msg->status == HSI_STATUS_ERROR) {
+ dev_err(&cl->device, "RX error detected\n");
+ ssip_release_cmd(msg);
+ ssip_error(cl);
+ return;
+ }
+ hsi_async_read(cl, msg);
+ dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
+ switch (cmdid) {
+ case SSIP_SW_BREAK:
+ /* Ignored */
+ break;
+ case SSIP_BOOTINFO_REQ:
+ ssip_rx_bootinforeq(cl, cmd);
+ break;
+ case SSIP_BOOTINFO_RESP:
+ ssip_rx_bootinforesp(cl, cmd);
+ break;
+ case SSIP_WAKETEST_RESULT:
+ ssip_rx_waketest(cl, cmd);
+ break;
+ case SSIP_START_TRANS:
+ ssip_rx_strans(cl, cmd);
+ break;
+ case SSIP_READY:
+ ssip_rx_ready(cl);
+ break;
+ default:
+ dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
+ break;
+ }
+}
+
+static void ssip_swbreak_complete(struct hsi_msg *msg)
+{
+ struct hsi_client *cl = msg->cl;
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ ssip_release_cmd(msg);
+ spin_lock(&ssi->lock);
+ if (list_empty(&ssi->txqueue)) {
+ if (atomic_read(&ssi->tx_usecnt)) {
+ ssip_set_txstate(ssi, SEND_READY);
+ } else {
+ ssip_set_txstate(ssi, SEND_IDLE);
+ hsi_stop_tx(cl);
+ }
+ spin_unlock(&ssi->lock);
+ } else {
+ spin_unlock(&ssi->lock);
+ ssip_xmit(cl);
+ }
+ netif_wake_queue(ssi->netdev);
+}
+
+static void ssip_tx_data_complete(struct hsi_msg *msg)
+{
+ struct hsi_client *cl = msg->cl;
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *cmsg;
+
+ if (msg->status == HSI_STATUS_ERROR) {
+ dev_err(&cl->device, "TX data error\n");
+ ssip_error(cl);
+ goto out;
+ }
+ spin_lock(&ssi->lock);
+ if (list_empty(&ssi->txqueue)) {
+ ssip_set_txstate(ssi, SENDING_SWBREAK);
+ spin_unlock(&ssi->lock);
+ cmsg = ssip_claim_cmd(ssi);
+ ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
+ cmsg->complete = ssip_swbreak_complete;
+ dev_dbg(&cl->device, "Send SWBREAK\n");
+ hsi_async_write(cl, cmsg);
+ } else {
+ spin_unlock(&ssi->lock);
+ ssip_xmit(cl);
+ }
+out:
+ ssip_free_data(msg);
+}
+
+void ssip_port_event(struct hsi_client *cl, unsigned long event)
+{
+ switch (event) {
+ case HSI_EVENT_START_RX:
+ ssip_start_rx(cl);
+ break;
+ case HSI_EVENT_STOP_RX:
+ ssip_stop_rx(cl);
+ break;
+ default:
+ return;
+ }
+}
+
+static int ssip_pn_open(struct net_device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ int err;
+
+ err = hsi_claim_port(cl, 1);
+ if (err < 0) {
+ dev_err(&cl->device, "SSI port already claimed\n");
+ return err;
+ }
+ err = hsi_register_port_event(cl, ssip_port_event);
+ if (err < 0) {
+ dev_err(&cl->device, "Register HSI port event failed (%d)\n",
+ err);
+ return err;
+ }
+ dev_dbg(&cl->device, "Configuring SSI port\n");
+ hsi_setup(cl);
+ spin_lock_bh(&ssi->lock);
+ if (!ssi->waketest) {
+ ssi->waketest = 1;
+ ssi_waketest(cl, 1); /* FIXME: To be removed */
+ }
+ ssi->main_state = INIT;
+ spin_unlock_bh(&ssi->lock);
+
+ return 0;
+}
+
+static int ssip_pn_stop(struct net_device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
+
+ ssip_reset(cl);
+ hsi_unregister_port_event(cl);
+ hsi_release_port(cl);
+
+ return 0;
+}
+
+static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+
+ if ((skb->protocol != htons(ETH_P_PHONET)) ||
+ (skb->len < SSIP_MIN_PN_HDR))
+ goto drop;
+ /* Pad to 32-bits - FIXME: Revisit*/
+ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
+ goto drop;
+
+ /*
+ * Modem sends Phonet messages over SSI with its own endianess...
+ * Assume that modem has the same endianess as we do.
+ */
+ if (skb_cow_head(skb, 0))
+ goto drop;
+
+ /* length field is exchanged in network byte order */
+ ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
+
+ msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
+ if (!msg) {
+ dev_dbg(&cl->device, "Dropping tx data: No memory\n");
+ goto drop;
+ }
+ msg->complete = ssip_tx_data_complete;
+
+ spin_lock_bh(&ssi->lock);
+ if (unlikely(ssi->main_state != ACTIVE)) {
+ spin_unlock_bh(&ssi->lock);
+ dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
+ goto drop2;
+ }
+ list_add_tail(&msg->link, &ssi->txqueue);
+ ssi->txqueue_len++;
+ if (dev->tx_queue_len < ssi->txqueue_len) {
+ dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
+ netif_stop_queue(dev);
+ }
+ if (ssi->send_state == SEND_IDLE) {
+ ssip_set_txstate(ssi, WAIT4READY);
+ spin_unlock_bh(&ssi->lock);
+ dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
+ hsi_start_tx(cl);
+ } else if (ssi->send_state == SEND_READY) {
+ /* Needed for cmt-speech workaround */
+ dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
+ ssi->txqueue_len);
+ spin_unlock_bh(&ssi->lock);
+ ssip_xmit(cl);
+ } else {
+ spin_unlock_bh(&ssi->lock);
+ }
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return 0;
+drop2:
+ hsi_free_msg(msg);
+drop:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/* CMT reset event handler */
+void ssip_reset_event(struct hsi_client *master)
+{
+ struct ssi_protocol *ssi = hsi_client_drvdata(master);
+ dev_err(&ssi->cl->device, "CMT reset detected!\n");
+ ssip_error(ssi->cl);
+}
+EXPORT_SYMBOL_GPL(ssip_reset_event);
+
+static const struct net_device_ops ssip_pn_ops = {
+ .ndo_open = ssip_pn_open,
+ .ndo_stop = ssip_pn_stop,
+ .ndo_start_xmit = ssip_pn_xmit,
+ .ndo_change_mtu = ssip_pn_set_mtu,
+};
+
+static void ssip_pn_setup(struct net_device *dev)
+{
+ dev->features = 0;
+ dev->netdev_ops = &ssip_pn_ops;
+ dev->type = ARPHRD_PHONET;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = SSIP_DEFAULT_MTU;
+ dev->hard_header_len = 1;
+ dev->dev_addr[0] = PN_MEDIA_SOS;
+ dev->addr_len = 1;
+ dev->tx_queue_len = SSIP_TXQUEUE_LEN;
+
+ dev->destructor = free_netdev;
+ dev->header_ops = &phonet_header_ops;
+}
+
+static int ssi_protocol_probe(struct device *dev)
+{
+ static const char ifname[] = "phonet%d";
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct ssi_protocol *ssi;
+ int err;
+
+ ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
+ if (!ssi) {
+ dev_err(dev, "No memory for ssi protocol\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ssi->lock);
+ init_timer_deferrable(&ssi->rx_wd);
+ init_timer_deferrable(&ssi->tx_wd);
+ init_timer(&ssi->keep_alive);
+ ssi->rx_wd.data = (unsigned long)cl;
+ ssi->rx_wd.function = ssip_wd;
+ ssi->tx_wd.data = (unsigned long)cl;
+ ssi->tx_wd.function = ssip_wd;
+ ssi->keep_alive.data = (unsigned long)cl;
+ ssi->keep_alive.function = ssip_keep_alive;
+ INIT_LIST_HEAD(&ssi->txqueue);
+ INIT_LIST_HEAD(&ssi->cmdqueue);
+ atomic_set(&ssi->tx_usecnt, 0);
+ hsi_client_set_drvdata(cl, ssi);
+ ssi->cl = cl;
+
+ ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
+ if (ssi->channel_id_cmd < 0) {
+ err = ssi->channel_id_cmd;
+ dev_err(dev, "Could not get cmd channel (%d)\n", err);
+ goto out;
+ }
+
+ ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
+ if (ssi->channel_id_data < 0) {
+ err = ssi->channel_id_data;
+ dev_err(dev, "Could not get data channel (%d)\n", err);
+ goto out;
+ }
+
+ err = ssip_alloc_cmds(ssi);
+ if (err < 0) {
+ dev_err(dev, "No memory for commands\n");
+ goto out;
+ }
+
+ ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup);
+ if (!ssi->netdev) {
+ dev_err(dev, "No memory for netdev\n");
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ SET_NETDEV_DEV(ssi->netdev, dev);
+ netif_carrier_off(ssi->netdev);
+ err = register_netdev(ssi->netdev);
+ if (err < 0) {
+ dev_err(dev, "Register netdev failed (%d)\n", err);
+ goto out2;
+ }
+
+ list_add(&ssi->link, &ssip_list);
+
+ dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
+ ssi->channel_id_cmd, ssi->channel_id_data);
+
+ return 0;
+out2:
+ free_netdev(ssi->netdev);
+out1:
+ ssip_free_cmds(ssi);
+out:
+ kfree(ssi);
+
+ return err;
+}
+
+static int ssi_protocol_remove(struct device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+
+ list_del(&ssi->link);
+ unregister_netdev(ssi->netdev);
+ ssip_free_cmds(ssi);
+ hsi_client_set_drvdata(cl, NULL);
+ kfree(ssi);
+
+ return 0;
+}
+
+static struct hsi_client_driver ssip_driver = {
+ .driver = {
+ .name = "ssi-protocol",
+ .owner = THIS_MODULE,
+ .probe = ssi_protocol_probe,
+ .remove = ssi_protocol_remove,
+ },
+};
+
+static int __init ssip_init(void)
+{
+ pr_info("SSI protocol aka McSAAB added\n");
+
+ return hsi_register_client_driver(&ssip_driver);
+}
+module_init(ssip_init);
+
+static void __exit ssip_exit(void)
+{
+ hsi_unregister_client_driver(&ssip_driver);
+ pr_info("SSI protocol driver removed\n");
+}
+module_exit(ssip_exit);
+
+MODULE_ALIAS("hsi:ssi-protocol");
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
+MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig
new file mode 100644
index 000000000000..6aba27808172
--- /dev/null
+++ b/drivers/hsi/controllers/Kconfig
@@ -0,0 +1,19 @@
+#
+# HSI controllers configuration
+#
+comment "HSI controllers"
+
+config OMAP_SSI
+ tristate "OMAP SSI hardware driver"
+ depends on HSI && OF && (ARCH_OMAP3 || (ARM && COMPILE_TEST))
+ ---help---
+ SSI is a legacy version of HSI. It is usually used to connect
+ an application engine with a cellular modem.
+ If you say Y here, you will enable the OMAP SSI hardware driver.
+
+ If unsure, say N.
+
+config OMAP_SSI_PORT
+ tristate
+ default m if OMAP_SSI=m
+ default y if OMAP_SSI=y
diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile
new file mode 100644
index 000000000000..d2665cf9c545
--- /dev/null
+++ b/drivers/hsi/controllers/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI controllers drivers
+#
+
+obj-$(CONFIG_OMAP_SSI) += omap_ssi.o
+obj-$(CONFIG_OMAP_SSI_PORT) += omap_ssi_port.o
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
new file mode 100644
index 000000000000..0fc7a7fd0140
--- /dev/null
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -0,0 +1,625 @@
+/* OMAP SSI driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_platform.h>
+#include <linux/hsi/hsi.h>
+#include <linux/idr.h>
+
+#include "omap_ssi_regs.h"
+#include "omap_ssi.h"
+
+/* For automatically allocated device IDs */
+static DEFINE_IDA(platform_omap_ssi_ida);
+
+#ifdef CONFIG_DEBUG_FS
+static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_controller *ssi = m->private;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+
+ pm_runtime_get_sync(ssi->device.parent);
+ seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG));
+ seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG));
+ seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG));
+ pm_runtime_put_sync(ssi->device.parent);
+
+ return 0;
+}
+
+static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_controller *ssi = m->private;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *gdd = omap_ssi->gdd;
+ void __iomem *sys = omap_ssi->sys;
+ int lch;
+
+ pm_runtime_get_sync(ssi->device.parent);
+
+ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
+ readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG));
+ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
+ readl(sys + SSI_GDD_MPU_IRQ_ENABLE_REG));
+ seq_printf(m, "HW_ID\t\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_HW_ID_REG));
+ seq_printf(m, "PPORT_ID\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_PPORT_ID_REG));
+ seq_printf(m, "MPORT_ID\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_MPORT_ID_REG));
+ seq_printf(m, "TEST\t\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_TEST_REG));
+ seq_printf(m, "GCR\t\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_GCR_REG));
+
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
+ seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
+ seq_printf(m, "CSDP\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CSDP_REG(lch)));
+ seq_printf(m, "CCR\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CCR_REG(lch)));
+ seq_printf(m, "CICR\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CICR_REG(lch)));
+ seq_printf(m, "CSR\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CSR_REG(lch)));
+ seq_printf(m, "CSSA\t\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_CSSA_REG(lch)));
+ seq_printf(m, "CDSA\t\t: 0x%08x\n",
+ readl(gdd + SSI_GDD_CDSA_REG(lch)));
+ seq_printf(m, "CEN\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CEN_REG(lch)));
+ seq_printf(m, "CSAC\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CSAC_REG(lch)));
+ seq_printf(m, "CDAC\t\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CDAC_REG(lch)));
+ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
+ readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch)));
+ }
+
+ pm_runtime_put_sync(ssi->device.parent);
+
+ return 0;
+}
+
+static int ssi_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_show, inode->i_private);
+}
+
+static int ssi_gdd_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_gdd_show, inode->i_private);
+}
+
+static const struct file_operations ssi_regs_fops = {
+ .open = ssi_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations ssi_gdd_regs_fops = {
+ .open = ssi_gdd_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct dentry *dir;
+
+ /* SSI controller */
+ omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
+ if (IS_ERR(omap_ssi->dir))
+ return PTR_ERR(omap_ssi->dir);
+
+ debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
+ &ssi_regs_fops);
+ /* SSI GDD (DMA) */
+ dir = debugfs_create_dir("gdd", omap_ssi->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
+
+ return 0;
+rback:
+ debugfs_remove_recursive(omap_ssi->dir);
+
+ return PTR_ERR(dir);
+}
+
+static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ debugfs_remove_recursive(omap_ssi->dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * FIXME: Horrible HACK needed until we remove the useless wakeline test
+ * in the CMT. To be removed !!!!
+ */
+void ssi_waketest(struct hsi_client *cl, unsigned int enable)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ omap_port->wktest = !!enable;
+ if (omap_port->wktest) {
+ pm_runtime_get_sync(ssi->device.parent);
+ writel_relaxed(SSI_WAKE(0),
+ omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
+ } else {
+ writel_relaxed(SSI_WAKE(0),
+ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
+ pm_runtime_put_sync(ssi->device.parent);
+ }
+}
+EXPORT_SYMBOL_GPL(ssi_waketest);
+
+static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg;
+ struct hsi_port *port = to_hsi_port(msg->cl->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ unsigned int dir;
+ u32 csr;
+ u32 val;
+
+ spin_lock(&omap_ssi->lock);
+
+ val = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ val &= ~SSI_GDD_LCH(lch);
+ writel_relaxed(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+
+ if (msg->ttype == HSI_MSG_READ) {
+ dir = DMA_FROM_DEVICE;
+ val = SSI_DATAAVAILABLE(msg->channel);
+ pm_runtime_put_sync(ssi->device.parent);
+ } else {
+ dir = DMA_TO_DEVICE;
+ val = SSI_DATAACCEPT(msg->channel);
+ /* Keep clocks reference for write pio event */
+ }
+ dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir);
+ csr = readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch));
+ omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */
+ dev_dbg(&port->device, "DMA completed ch %d ttype %d\n",
+ msg->channel, msg->ttype);
+ spin_unlock(&omap_ssi->lock);
+ if (csr & SSI_CSR_TOUR) { /* Timeout error */
+ msg->status = HSI_STATUS_ERROR;
+ msg->actual_len = 0;
+ spin_lock(&omap_port->lock);
+ list_del(&msg->link); /* Dequeue msg */
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ return;
+ }
+ spin_lock(&omap_port->lock);
+ val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->actual_len = sg_dma_len(msg->sgt.sgl);
+}
+
+static void ssi_gdd_tasklet(unsigned long dev)
+{
+ struct hsi_controller *ssi = (struct hsi_controller *)dev;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+ unsigned int lch;
+ u32 status_reg;
+
+ pm_runtime_get_sync(ssi->device.parent);
+
+ status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
+ if (status_reg & SSI_GDD_LCH(lch))
+ ssi_gdd_complete(ssi, lch);
+ }
+ writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+
+ pm_runtime_put_sync(ssi->device.parent);
+
+ if (status_reg)
+ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
+ else
+ enable_irq(omap_ssi->gdd_irq);
+
+}
+
+static irqreturn_t ssi_gdd_isr(int irq, void *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ unsigned long rate = clk_get_rate(omap_ssi->fck);
+ return rate;
+}
+
+static int __init ssi_get_iomem(struct platform_device *pd,
+ const char *name, void __iomem **pbase, dma_addr_t *phy)
+{
+ struct resource *mem;
+ struct resource *ioarea;
+ void __iomem *base;
+ struct hsi_controller *ssi = platform_get_drvdata(pd);
+
+ mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
+ if (!mem) {
+ dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
+ return -ENXIO;
+ }
+ ioarea = devm_request_mem_region(&ssi->device, mem->start,
+ resource_size(mem), dev_name(&pd->dev));
+ if (!ioarea) {
+ dev_err(&pd->dev, "%s IO memory region request failed\n",
+ mem->name);
+ return -ENXIO;
+ }
+ base = devm_ioremap(&ssi->device, mem->start, resource_size(mem));
+ if (!base) {
+ dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
+ return -ENXIO;
+ }
+ *pbase = base;
+
+ if (phy)
+ *phy = mem->start;
+
+ return 0;
+}
+
+static int __init ssi_add_controller(struct hsi_controller *ssi,
+ struct platform_device *pd)
+{
+ struct omap_ssi_controller *omap_ssi;
+ int err;
+
+ omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL);
+ if (!omap_ssi) {
+ dev_err(&pd->dev, "not enough memory for omap ssi\n");
+ return -ENOMEM;
+ }
+
+ ssi->id = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
+ if (ssi->id < 0) {
+ err = ssi->id;
+ goto out_err;
+ }
+
+ ssi->owner = THIS_MODULE;
+ ssi->device.parent = &pd->dev;
+ dev_set_name(&ssi->device, "ssi%d", ssi->id);
+ hsi_controller_set_drvdata(ssi, omap_ssi);
+ omap_ssi->dev = &ssi->device;
+ err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL);
+ if (err < 0)
+ goto out_err;
+ err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL);
+ if (err < 0)
+ goto out_err;
+ omap_ssi->gdd_irq = platform_get_irq_byname(pd, "gdd_mpu");
+ if (omap_ssi->gdd_irq < 0) {
+ dev_err(&pd->dev, "GDD IRQ resource missing\n");
+ err = omap_ssi->gdd_irq;
+ goto out_err;
+ }
+ tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet,
+ (unsigned long)ssi);
+ err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr,
+ 0, "gdd_mpu", ssi);
+ if (err < 0) {
+ dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)",
+ omap_ssi->gdd_irq, err);
+ goto out_err;
+ }
+
+ omap_ssi->port = devm_kzalloc(&ssi->device,
+ sizeof(struct omap_ssi_port *) * ssi->num_ports, GFP_KERNEL);
+ if (!omap_ssi->port) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ omap_ssi->fck = devm_clk_get(&ssi->device, "ssi_ssr_fck");
+ if (IS_ERR(omap_ssi->fck)) {
+ dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n",
+ PTR_ERR(omap_ssi->fck));
+ err = -ENODEV;
+ goto out_err;
+ }
+
+ /* TODO: find register, which can be used to detect context loss */
+ omap_ssi->get_loss = NULL;
+
+ omap_ssi->max_speed = UINT_MAX;
+ spin_lock_init(&omap_ssi->lock);
+ err = hsi_register_controller(ssi);
+
+ if (err < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ida_simple_remove(&platform_omap_ssi_ida, ssi->id);
+ return err;
+}
+
+static int __init ssi_hw_init(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ unsigned int i;
+ u32 val;
+ int err;
+
+ err = pm_runtime_get_sync(ssi->device.parent);
+ if (err < 0) {
+ dev_err(&ssi->device, "runtime PM failed %d\n", err);
+ return err;
+ }
+ /* Reseting SSI controller */
+ writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG);
+ val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
+ for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) {
+ msleep(20);
+ val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
+ }
+ if (!(val & SSI_RESETDONE)) {
+ dev_err(&ssi->device, "SSI HW reset failed\n");
+ pm_runtime_put_sync(ssi->device.parent);
+ return -EIO;
+ }
+ /* Reseting GDD */
+ writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
+ /* Get FCK rate in KHz */
+ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000);
+ dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate);
+ /* Set default PM settings */
+ val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART;
+ writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG);
+ omap_ssi->sysconfig = val;
+ writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
+ omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
+ pm_runtime_put_sync(ssi->device.parent);
+
+ return 0;
+}
+
+static void ssi_remove_controller(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int id = ssi->id;
+ tasklet_kill(&omap_ssi->gdd_tasklet);
+ hsi_unregister_controller(ssi);
+ ida_simple_remove(&platform_omap_ssi_ida, id);
+}
+
+static inline int ssi_of_get_available_ports_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ if (of_device_is_compatible(child, "ti,omap3-ssi-port"))
+ num++;
+
+ return num;
+}
+
+static int ssi_remove_ports(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ of_device_unregister(pdev);
+
+ return 0;
+}
+
+static int __init ssi_probe(struct platform_device *pd)
+{
+ struct platform_device *childpdev;
+ struct device_node *np = pd->dev.of_node;
+ struct device_node *child;
+ struct hsi_controller *ssi;
+ int err;
+ int num_ports;
+
+ if (!np) {
+ dev_err(&pd->dev, "missing device tree data\n");
+ return -EINVAL;
+ }
+
+ num_ports = ssi_of_get_available_ports_count(np);
+
+ ssi = hsi_alloc_controller(num_ports, GFP_KERNEL);
+ if (!ssi) {
+ dev_err(&pd->dev, "No memory for controller\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pd, ssi);
+
+ err = ssi_add_controller(ssi, pd);
+ if (err < 0)
+ goto out1;
+
+ pm_runtime_irq_safe(&pd->dev);
+ pm_runtime_enable(&pd->dev);
+
+ err = ssi_hw_init(ssi);
+ if (err < 0)
+ goto out2;
+#ifdef CONFIG_DEBUG_FS
+ err = ssi_debug_add_ctrl(ssi);
+ if (err < 0)
+ goto out2;
+#endif
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_device_is_compatible(child, "ti,omap3-ssi-port"))
+ continue;
+
+ childpdev = of_platform_device_create(child, NULL, &pd->dev);
+ if (!childpdev) {
+ err = -ENODEV;
+ dev_err(&pd->dev, "failed to create ssi controller port\n");
+ goto out3;
+ }
+ }
+
+ dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n",
+ ssi->id, num_ports);
+ return err;
+out3:
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+out2:
+ ssi_remove_controller(ssi);
+out1:
+ platform_set_drvdata(pd, NULL);
+ pm_runtime_disable(&pd->dev);
+
+ return err;
+}
+
+static int __exit ssi_remove(struct platform_device *pd)
+{
+ struct hsi_controller *ssi = platform_get_drvdata(pd);
+
+#ifdef CONFIG_DEBUG_FS
+ ssi_debug_remove_ctrl(ssi);
+#endif
+ ssi_remove_controller(ssi);
+ platform_set_drvdata(pd, NULL);
+
+ pm_runtime_disable(&pd->dev);
+
+ /* cleanup of of_platform_populate() call */
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int omap_ssi_runtime_suspend(struct device *dev)
+{
+ struct hsi_controller *ssi = dev_get_drvdata(dev);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(dev, "runtime suspend!\n");
+
+ if (omap_ssi->get_loss)
+ omap_ssi->loss_count =
+ omap_ssi->get_loss(ssi->device.parent);
+
+ return 0;
+}
+
+static int omap_ssi_runtime_resume(struct device *dev)
+{
+ struct hsi_controller *ssi = dev_get_drvdata(dev);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(dev, "runtime resume!\n");
+
+ if ((omap_ssi->get_loss) && (omap_ssi->loss_count ==
+ omap_ssi->get_loss(ssi->device.parent)))
+ return 0;
+
+ writel_relaxed(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_ssi_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_ssi_runtime_suspend, omap_ssi_runtime_resume,
+ NULL)
+};
+
+#define DEV_PM_OPS (&omap_ssi_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ssi_of_match[] = {
+ { .compatible = "ti,omap3-ssi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_ssi_of_match);
+#else
+#define omap_ssi_of_match NULL
+#endif
+
+static struct platform_driver ssi_pdriver = {
+ .remove = __exit_p(ssi_remove),
+ .driver = {
+ .name = "omap_ssi",
+ .owner = THIS_MODULE,
+ .pm = DEV_PM_OPS,
+ .of_match_table = omap_ssi_of_match,
+ },
+};
+
+module_platform_driver_probe(ssi_pdriver, ssi_probe);
+
+MODULE_ALIAS("platform:omap_ssi");
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
+MODULE_DESCRIPTION("Synchronous Serial Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h
new file mode 100644
index 000000000000..9d056417d88c
--- /dev/null
+++ b/drivers/hsi/controllers/omap_ssi.h
@@ -0,0 +1,166 @@
+/* OMAP SSI internal interface.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013 Sebastian Reichel
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_OMAP_SSI_H__
+#define __LINUX_HSI_OMAP_SSI_H__
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hsi/hsi.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define SSI_MAX_CHANNELS 8
+#define SSI_MAX_GDD_LCH 8
+#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
+
+/**
+ * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context
+ * @mode: Bit transmission mode
+ * @channels: Number of channels
+ * @framesize: Frame size in bits
+ * @timeout: RX frame timeout
+ * @divisor: TX divider
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct omap_ssm_ctx {
+ u32 mode;
+ u32 channels;
+ u32 frame_size;
+ union {
+ u32 timeout; /* Rx Only */
+ struct {
+ u32 arb_mode;
+ u32 divisor;
+ }; /* Tx only */
+ };
+};
+
+/**
+ * struct omap_ssi_port - OMAP SSI port data
+ * @dev: device associated to the port (HSI port)
+ * @pdev: platform device associated to the port
+ * @sst_dma: SSI transmitter physical base address
+ * @ssr_dma: SSI receiver physical base address
+ * @sst_base: SSI transmitter base address
+ * @ssr_base: SSI receiver base address
+ * @wk_lock: spin lock to serialize access to the wake lines
+ * @lock: Spin lock to serialize access to the SSI port
+ * @channels: Current number of channels configured (1,2,4 or 8)
+ * @txqueue: TX message queues
+ * @rxqueue: RX message queues
+ * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode)
+ * @irq: IRQ number
+ * @wake_irq: IRQ number for incoming wake line (-1 if none)
+ * @wake_gpio: GPIO number for incoming wake line (-1 if none)
+ * @pio_tasklet: Bottom half for PIO transfers and events
+ * @wake_tasklet: Bottom half for incoming wake events
+ * @wkin_cken: Keep track of clock references due to the incoming wake line
+ * @wk_refcount: Reference count for output wake line
+ * @sys_mpu_enable: Context for the interrupt enable register for irq 0
+ * @sst: Context for the synchronous serial transmitter
+ * @ssr: Context for the synchronous serial receiver
+ */
+struct omap_ssi_port {
+ struct device *dev;
+ struct device *pdev;
+ dma_addr_t sst_dma;
+ dma_addr_t ssr_dma;
+ void __iomem *sst_base;
+ void __iomem *ssr_base;
+ spinlock_t wk_lock;
+ spinlock_t lock;
+ unsigned int channels;
+ struct list_head txqueue[SSI_MAX_CHANNELS];
+ struct list_head rxqueue[SSI_MAX_CHANNELS];
+ struct list_head brkqueue;
+ unsigned int irq;
+ int wake_irq;
+ int wake_gpio;
+ struct tasklet_struct pio_tasklet;
+ struct tasklet_struct wake_tasklet;
+ bool wktest:1; /* FIXME: HACK to be removed */
+ bool wkin_cken:1; /* Workaround */
+ unsigned int wk_refcount;
+ /* OMAP SSI port context */
+ u32 sys_mpu_enable; /* We use only one irq */
+ struct omap_ssm_ctx sst;
+ struct omap_ssm_ctx ssr;
+ u32 loss_count;
+ u32 port_id;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir;
+#endif
+};
+
+/**
+ * struct gdd_trn - GDD transaction data
+ * @msg: Pointer to the HSI message being served
+ * @sg: Pointer to the current sg entry being served
+ */
+struct gdd_trn {
+ struct hsi_msg *msg;
+ struct scatterlist *sg;
+};
+
+/**
+ * struct omap_ssi_controller - OMAP SSI controller data
+ * @dev: device associated to the controller (HSI controller)
+ * @sys: SSI I/O base address
+ * @gdd: GDD I/O base address
+ * @fck: SSI functional clock
+ * @gdd_irq: IRQ line for GDD
+ * @gdd_tasklet: bottom half for DMA transfers
+ * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers
+ * @lock: lock to serialize access to GDD
+ * @loss_count: To follow if we need to restore context or not
+ * @max_speed: Maximum TX speed (Kb/s) set by the clients.
+ * @sysconfig: SSI controller saved context
+ * @gdd_gcr: SSI GDD saved context
+ * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any
+ * @port: Array of pointers of the ports of the controller
+ * @dir: Debugfs SSI root directory
+ */
+struct omap_ssi_controller {
+ struct device *dev;
+ void __iomem *sys;
+ void __iomem *gdd;
+ struct clk *fck;
+ unsigned int gdd_irq;
+ struct tasklet_struct gdd_tasklet;
+ struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH];
+ spinlock_t lock;
+ unsigned long fck_rate;
+ u32 loss_count;
+ u32 max_speed;
+ /* OMAP SSI Controller context */
+ u32 sysconfig;
+ u32 gdd_gcr;
+ int (*get_loss)(struct device *dev);
+ struct omap_ssi_port **port;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir;
+#endif
+};
+
+#endif /* __LINUX_HSI_OMAP_SSI_H__ */
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
new file mode 100644
index 000000000000..b8693f0b27fe
--- /dev/null
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -0,0 +1,1399 @@
+/* OMAP SSI port driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/of_gpio.h>
+#include <linux/debugfs.h>
+
+#include "omap_ssi_regs.h"
+#include "omap_ssi.h"
+
+static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
+{
+ return 0;
+}
+
+static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
+{
+ return 0;
+}
+
+static inline unsigned int ssi_wakein(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ return gpio_get_value(omap_port->wake_gpio);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ssi_debug_remove_port(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ debugfs_remove_recursive(omap_port->dir);
+}
+
+static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_port *port = m->private;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *base = omap_ssi->sys;
+ unsigned int ch;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ if (omap_port->wake_irq > 0)
+ seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
+ seq_printf(m, "WAKE\t\t: 0x%08x\n",
+ readl(base + SSI_WAKE_REG(port->num)));
+ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
+ readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
+ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
+ readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
+ /* SST */
+ base = omap_port->sst_base;
+ seq_puts(m, "\nSST\n===\n");
+ seq_printf(m, "ID SST\t\t: 0x%08x\n",
+ readl(base + SSI_SST_ID_REG));
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ readl(base + SSI_SST_MODE_REG));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ readl(base + SSI_SST_FRAMESIZE_REG));
+ seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
+ readl(base + SSI_SST_DIVISOR_REG));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ readl(base + SSI_SST_CHANNELS_REG));
+ seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
+ readl(base + SSI_SST_ARBMODE_REG));
+ seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
+ readl(base + SSI_SST_TXSTATE_REG));
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ readl(base + SSI_SST_BUFSTATE_REG));
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ readl(base + SSI_SST_BREAK_REG));
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ readl(base + SSI_SST_BUFFER_CH_REG(ch)));
+ }
+ /* SSR */
+ base = omap_port->ssr_base;
+ seq_puts(m, "\nSSR\n===\n");
+ seq_printf(m, "ID SSR\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_ID_REG));
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_MODE_REG));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ readl(base + SSI_SSR_FRAMESIZE_REG));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ readl(base + SSI_SSR_CHANNELS_REG));
+ seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_TIMEOUT_REG));
+ seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_RXSTATE_REG));
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ readl(base + SSI_SSR_BUFSTATE_REG));
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_BREAK_REG));
+ seq_printf(m, "ERROR\t\t: 0x%08x\n",
+ readl(base + SSI_SSR_ERROR_REG));
+ seq_printf(m, "ERRORACK\t: 0x%08x\n",
+ readl(base + SSI_SSR_ERRORACK_REG));
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
+ }
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return 0;
+}
+
+static int ssi_port_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_port_show, inode->i_private);
+}
+
+static const struct file_operations ssi_port_regs_fops = {
+ .open = ssi_port_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ssi_div_get(void *data, u64 *val)
+{
+ struct hsi_port *port = data;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ pm_runtime_get_sync(omap_port->pdev);
+ *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return 0;
+}
+
+static int ssi_div_set(void *data, u64 val)
+{
+ struct hsi_port *port = data;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ if (val > 127)
+ return -EINVAL;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
+ omap_port->sst.divisor = val;
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
+
+static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
+ struct dentry *dir)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+
+ dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ omap_port->dir = dir;
+ debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
+ dir = debugfs_create_dir("sst", dir);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
+ &ssi_sst_div_fops);
+
+ return 0;
+}
+#endif
+
+static int ssi_claim_lch(struct hsi_msg *msg)
+{
+
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int lch;
+
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
+ if (!omap_ssi->gdd_trn[lch].msg) {
+ omap_ssi->gdd_trn[lch].msg = msg;
+ omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
+ return lch;
+ }
+
+ return -EBUSY;
+}
+
+static int ssi_start_dma(struct hsi_msg *msg, int lch)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *gdd = omap_ssi->gdd;
+ int err;
+ u16 csdp;
+ u16 ccr;
+ u32 s_addr;
+ u32 d_addr;
+ u32 tmp;
+
+ if (msg->ttype == HSI_MSG_READ) {
+ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
+ DMA_FROM_DEVICE);
+ if (err < 0) {
+ dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ return err;
+ }
+ csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
+ SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
+ SSI_DATA_TYPE_S32;
+ ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
+ ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
+ SSI_CCR_ENABLE;
+ s_addr = omap_port->ssr_dma +
+ SSI_SSR_BUFFER_CH_REG(msg->channel);
+ d_addr = sg_dma_address(msg->sgt.sgl);
+ } else {
+ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
+ DMA_TO_DEVICE);
+ if (err < 0) {
+ dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ return err;
+ }
+ csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
+ SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
+ SSI_DATA_TYPE_S32;
+ ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
+ ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
+ SSI_CCR_ENABLE;
+ s_addr = sg_dma_address(msg->sgt.sgl);
+ d_addr = omap_port->sst_dma +
+ SSI_SST_BUFFER_CH_REG(msg->channel);
+ }
+ dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
+ lch, csdp, ccr, s_addr, d_addr);
+
+ /* Hold clocks during the transfer */
+ pm_runtime_get_sync(omap_port->pdev);
+
+ writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
+ writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
+ writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
+ writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
+ writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
+ gdd + SSI_GDD_CEN_REG(lch));
+
+ spin_lock_bh(&omap_ssi->lock);
+ tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp |= SSI_GDD_LCH(lch);
+ writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ spin_unlock_bh(&omap_ssi->lock);
+ writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ return 0;
+}
+
+static int ssi_start_pio(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ u32 val;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ if (msg->ttype == HSI_MSG_WRITE) {
+ val = SSI_DATAACCEPT(msg->channel);
+ /* Hold clocks for pio writes */
+ pm_runtime_get_sync(omap_port->pdev);
+ } else {
+ val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
+ }
+ dev_dbg(&port->device, "Single %s transfer\n",
+ msg->ttype ? "write" : "read");
+ val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ pm_runtime_put_sync(omap_port->pdev);
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ return 0;
+}
+
+static int ssi_start_transfer(struct list_head *queue)
+{
+ struct hsi_msg *msg;
+ int lch = -1;
+
+ if (list_empty(queue))
+ return 0;
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if (msg->status != HSI_STATUS_QUEUED)
+ return 0;
+ if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
+ lch = ssi_claim_lch(msg);
+ if (lch >= 0)
+ return ssi_start_dma(msg, lch);
+ else
+ return ssi_start_pio(msg);
+}
+
+static int ssi_async_break(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int err = 0;
+ u32 tmp;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ if (msg->ttype == HSI_MSG_WRITE) {
+ if (omap_port->sst.mode != SSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->complete(msg);
+ } else {
+ if (omap_port->ssr.mode != SSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ spin_lock_bh(&omap_port->lock);
+ tmp = readl(omap_ssi->sys +
+ SSI_MPU_ENABLE_REG(port->num, 0));
+ writel(tmp | SSI_BREAKDETECTED,
+ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ msg->status = HSI_STATUS_PROCEEDING;
+ list_add_tail(&msg->link, &omap_port->brkqueue);
+ spin_unlock_bh(&omap_port->lock);
+ }
+out:
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return err;
+}
+
+static int ssi_async(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct list_head *queue;
+ int err = 0;
+
+ BUG_ON(!msg);
+
+ if (msg->sgt.nents > 1)
+ return -ENOSYS; /* TODO: Add sg support */
+
+ if (msg->break_frame)
+ return ssi_async_break(msg);
+
+ if (msg->ttype) {
+ BUG_ON(msg->channel >= omap_port->sst.channels);
+ queue = &omap_port->txqueue[msg->channel];
+ } else {
+ BUG_ON(msg->channel >= omap_port->ssr.channels);
+ queue = &omap_port->rxqueue[msg->channel];
+ }
+ msg->status = HSI_STATUS_QUEUED;
+ spin_lock_bh(&omap_port->lock);
+ list_add_tail(&msg->link, queue);
+ err = ssi_start_transfer(queue);
+ if (err < 0) {
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ }
+ spin_unlock_bh(&omap_port->lock);
+ dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
+ msg->status, msg->ttype, msg->channel);
+
+ return err;
+}
+
+static u32 ssi_calculate_div(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ u32 tx_fckrate = (u32) omap_ssi->fck_rate;
+
+ /* / 2 : SSI TX clock is always half of the SSI functional clock */
+ tx_fckrate >>= 1;
+ /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
+ tx_fckrate--;
+ dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
+ tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
+ omap_ssi->max_speed);
+
+ return tx_fckrate / omap_ssi->max_speed;
+}
+
+static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
+{
+ struct list_head *node, *tmp;
+ struct hsi_msg *msg;
+
+ list_for_each_safe(node, tmp, queue) {
+ msg = list_entry(node, struct hsi_msg, link);
+ if ((cl) && (cl != msg->cl))
+ continue;
+ list_del(node);
+ pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
+ msg->channel, msg, msg->sgt.sgl->length,
+ msg->ttype, msg->context);
+ if (msg->destructor)
+ msg->destructor(msg);
+ else
+ hsi_free_msg(msg);
+ }
+}
+
+static int ssi_setup(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sst = omap_port->sst_base;
+ void __iomem *ssr = omap_port->ssr_base;
+ u32 div;
+ u32 val;
+ int err = 0;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ spin_lock_bh(&omap_port->lock);
+ if (cl->tx_cfg.speed)
+ omap_ssi->max_speed = cl->tx_cfg.speed;
+ div = ssi_calculate_div(ssi);
+ if (div > SSI_MAX_DIVISOR) {
+ dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
+ cl->tx_cfg.speed, div);
+ err = -EINVAL;
+ goto out;
+ }
+ /* Set TX/RX module to sleep to stop TX/RX during cfg update */
+ writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
+ writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
+ /* Flush posted write */
+ val = readl(ssr + SSI_SSR_MODE_REG);
+ /* TX */
+ writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
+ writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
+ writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
+ writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
+ writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
+ /* RX */
+ writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
+ writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
+ writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
+ /* Cleanup the break queue if we leave FRAME mode */
+ if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
+ (cl->rx_cfg.mode != SSI_MODE_FRAME))
+ ssi_flush_queue(&omap_port->brkqueue, cl);
+ writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
+ omap_port->channels = max(cl->rx_cfg.num_hw_channels,
+ cl->tx_cfg.num_hw_channels);
+ /* Shadow registering for OFF mode */
+ /* SST */
+ omap_port->sst.divisor = div;
+ omap_port->sst.frame_size = 31;
+ omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
+ omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
+ omap_port->sst.mode = cl->tx_cfg.mode;
+ /* SSR */
+ omap_port->ssr.frame_size = 31;
+ omap_port->ssr.timeout = 0;
+ omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
+ omap_port->ssr.mode = cl->rx_cfg.mode;
+out:
+ spin_unlock_bh(&omap_port->lock);
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return err;
+}
+
+static int ssi_flush(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ void __iomem *sst = omap_port->sst_base;
+ void __iomem *ssr = omap_port->ssr_base;
+ unsigned int i;
+ u32 err;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ spin_lock_bh(&omap_port->lock);
+ /* Stop all DMA transfers */
+ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if (!msg || (port != hsi_get_port(msg->cl)))
+ continue;
+ writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ if (msg->ttype == HSI_MSG_READ)
+ pm_runtime_put_sync(omap_port->pdev);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ /* Flush all SST buffers */
+ writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
+ writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
+ /* Flush all SSR buffers */
+ writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
+ writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
+ /* Flush all errors */
+ err = readl(ssr + SSI_SSR_ERROR_REG);
+ writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
+ /* Flush break */
+ writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
+ /* Clear interrupts */
+ writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel_relaxed(0xffffff00,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ /* Dequeue all pending requests */
+ for (i = 0; i < omap_port->channels; i++) {
+ /* Release write clocks */
+ if (!list_empty(&omap_port->txqueue[i]))
+ pm_runtime_put_sync(omap_port->pdev);
+ ssi_flush_queue(&omap_port->txqueue[i], NULL);
+ ssi_flush_queue(&omap_port->rxqueue[i], NULL);
+ }
+ ssi_flush_queue(&omap_port->brkqueue, NULL);
+ spin_unlock_bh(&omap_port->lock);
+ pm_runtime_put_sync(omap_port->pdev);
+
+ return 0;
+}
+
+static int ssi_start_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
+
+ spin_lock_bh(&omap_port->wk_lock);
+ if (omap_port->wk_refcount++) {
+ spin_unlock_bh(&omap_port->wk_lock);
+ return 0;
+ }
+ pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
+ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
+ spin_unlock_bh(&omap_port->wk_lock);
+
+ return 0;
+}
+
+static int ssi_stop_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
+
+ spin_lock_bh(&omap_port->wk_lock);
+ BUG_ON(!omap_port->wk_refcount);
+ if (--omap_port->wk_refcount) {
+ spin_unlock_bh(&omap_port->wk_lock);
+ return 0;
+ }
+ writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
+ pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
+ spin_unlock_bh(&omap_port->wk_lock);
+
+ return 0;
+}
+
+static void ssi_transfer(struct omap_ssi_port *omap_port,
+ struct list_head *queue)
+{
+ struct hsi_msg *msg;
+ int err = -1;
+
+ spin_lock_bh(&omap_port->lock);
+ while (err < 0) {
+ err = ssi_start_transfer(queue);
+ if (err < 0) {
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ msg->status = HSI_STATUS_ERROR;
+ msg->actual_len = 0;
+ list_del(&msg->link);
+ spin_unlock_bh(&omap_port->lock);
+ msg->complete(msg);
+ spin_lock_bh(&omap_port->lock);
+ }
+ }
+ spin_unlock_bh(&omap_port->lock);
+}
+
+static void ssi_cleanup_queues(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 rxbufstate = 0;
+ u32 txbufstate = 0;
+ u32 status = SSI_ERROROCCURED;
+ u32 tmp;
+
+ ssi_flush_queue(&omap_port->brkqueue, cl);
+ if (list_empty(&omap_port->brkqueue))
+ status |= SSI_BREAKDETECTED;
+
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->txqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
+ link);
+ if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
+ txbufstate |= (1 << i);
+ status |= SSI_DATAACCEPT(i);
+ /* Release the clocks writes, also GDD ones */
+ pm_runtime_put_sync(omap_port->pdev);
+ }
+ ssi_flush_queue(&omap_port->txqueue[i], cl);
+ }
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->rxqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
+ link);
+ if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
+ rxbufstate |= (1 << i);
+ status |= SSI_DATAAVAILABLE(i);
+ }
+ ssi_flush_queue(&omap_port->rxqueue[i], cl);
+ /* Check if we keep the error detection interrupt armed */
+ if (!list_empty(&omap_port->rxqueue[i]))
+ status &= ~SSI_ERROROCCURED;
+ }
+ /* Cleanup write buffers */
+ tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
+ tmp &= ~txbufstate;
+ writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
+ /* Cleanup read buffers */
+ tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
+ tmp &= ~rxbufstate;
+ writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
+ /* Disarm and ack pending interrupts */
+ tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ tmp &= ~status;
+ writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel_relaxed(status, omap_ssi->sys +
+ SSI_MPU_STATUS_REG(port->num, 0));
+}
+
+static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 val = 0;
+ u32 tmp;
+
+ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if ((!msg) || (msg->cl != cl))
+ continue;
+ writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ val |= (1 << i);
+ /*
+ * Clock references for write will be handled in
+ * ssi_cleanup_queues
+ */
+ if (msg->ttype == HSI_MSG_READ)
+ pm_runtime_put_sync(omap_port->pdev);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp &= ~val;
+ writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+}
+
+static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
+{
+ writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
+ writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
+ /* OCP barrier */
+ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
+
+ return 0;
+}
+
+static int ssi_release(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+
+ spin_lock_bh(&omap_port->lock);
+ pm_runtime_get_sync(omap_port->pdev);
+ /* Stop all the pending DMA requests for that client */
+ ssi_cleanup_gdd(ssi, cl);
+ /* Now cleanup all the queues */
+ ssi_cleanup_queues(cl);
+ pm_runtime_put_sync(omap_port->pdev);
+ /* If it is the last client of the port, do extra checks and cleanup */
+ if (port->claimed <= 1) {
+ /*
+ * Drop the clock reference for the incoming wake line
+ * if it is still kept high by the other side.
+ */
+ if (omap_port->wkin_cken) {
+ pm_runtime_put_sync(omap_port->pdev);
+ omap_port->wkin_cken = 0;
+ }
+ pm_runtime_get_sync(omap_port->pdev);
+ /* Stop any SSI TX/RX without a client */
+ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
+ omap_port->sst.mode = SSI_MODE_SLEEP;
+ omap_port->ssr.mode = SSI_MODE_SLEEP;
+ pm_runtime_put_sync(omap_port->pdev);
+ WARN_ON(omap_port->wk_refcount != 0);
+ }
+ spin_unlock_bh(&omap_port->lock);
+
+ return 0;
+}
+
+
+
+static void ssi_error(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 err;
+ u32 val;
+ u32 tmp;
+
+ /* ACK error */
+ err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
+ dev_err(&port->device, "SSI error: 0x%02x\n", err);
+ if (!err) {
+ dev_dbg(&port->device, "spurious SSI error ignored!\n");
+ return;
+ }
+ spin_lock(&omap_ssi->lock);
+ /* Cancel all GDD read transfers */
+ for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if ((msg) && (msg->ttype == HSI_MSG_READ)) {
+ writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ val |= (1 << i);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ }
+ tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp &= ~val;
+ writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ spin_unlock(&omap_ssi->lock);
+ /* Cancel all PIO read transfers */
+ spin_lock(&omap_port->lock);
+ tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
+ writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ /* ACK error */
+ writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
+ writel_relaxed(SSI_ERROROCCURED,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ /* Signal the error all current pending read requests */
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->rxqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
+ link);
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ /* Now restart queued reads if any */
+ ssi_transfer(omap_port, &omap_port->rxqueue[i]);
+ spin_lock(&omap_port->lock);
+ }
+ spin_unlock(&omap_port->lock);
+}
+
+static void ssi_break_complete(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ struct hsi_msg *tmp;
+ u32 val;
+
+ dev_dbg(&port->device, "HWBREAK received\n");
+
+ spin_lock(&omap_port->lock);
+ val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ val &= ~SSI_BREAKDETECTED;
+ writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
+ writel(SSI_BREAKDETECTED,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
+ msg->status = HSI_STATUS_COMPLETED;
+ spin_lock(&omap_port->lock);
+ list_del(&msg->link);
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ }
+
+}
+
+static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
+{
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_msg *msg;
+ u32 *buf;
+ u32 reg;
+ u32 val;
+
+ spin_lock(&omap_port->lock);
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PENDING;
+ }
+ if (msg->ttype == HSI_MSG_WRITE)
+ val = SSI_DATAACCEPT(msg->channel);
+ else
+ val = SSI_DATAAVAILABLE(msg->channel);
+ if (msg->status == HSI_STATUS_PROCEEDING) {
+ buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
+ if (msg->ttype == HSI_MSG_WRITE)
+ writel(*buf, omap_port->sst_base +
+ SSI_SST_BUFFER_CH_REG(msg->channel));
+ else
+ *buf = readl(omap_port->ssr_base +
+ SSI_SSR_BUFFER_CH_REG(msg->channel));
+ dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
+ msg->ttype, *buf);
+ msg->actual_len += sizeof(*buf);
+ if (msg->actual_len >= msg->sgt.sgl->length)
+ msg->status = HSI_STATUS_COMPLETED;
+ /*
+ * Wait for the last written frame to be really sent before
+ * we call the complete callback
+ */
+ if ((msg->status == HSI_STATUS_PROCEEDING) ||
+ ((msg->status == HSI_STATUS_COMPLETED) &&
+ (msg->ttype == HSI_MSG_WRITE))) {
+ writel(val, omap_ssi->sys +
+ SSI_MPU_STATUS_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ return;
+ }
+
+ }
+ /* Transfer completed at this point */
+ reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ if (msg->ttype == HSI_MSG_WRITE) {
+ /* Release clocks for write transfer */
+ pm_runtime_put_sync(omap_port->pdev);
+ }
+ reg &= ~val;
+ writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ list_del(&msg->link);
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ ssi_transfer(omap_port, queue);
+}
+
+static void ssi_pio_tasklet(unsigned long ssi_port)
+{
+ struct hsi_port *port = (struct hsi_port *)ssi_port;
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+ unsigned int ch;
+ u32 status_reg;
+
+ pm_runtime_get_sync(omap_port->pdev);
+ status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
+
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ if (status_reg & SSI_DATAACCEPT(ch))
+ ssi_pio_complete(port, &omap_port->txqueue[ch]);
+ if (status_reg & SSI_DATAAVAILABLE(ch))
+ ssi_pio_complete(port, &omap_port->rxqueue[ch]);
+ }
+ if (status_reg & SSI_BREAKDETECTED)
+ ssi_break_complete(port);
+ if (status_reg & SSI_ERROROCCURED)
+ ssi_error(port);
+
+ status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ pm_runtime_put_sync(omap_port->pdev);
+
+ if (status_reg)
+ tasklet_hi_schedule(&omap_port->pio_tasklet);
+ else
+ enable_irq(omap_port->irq);
+}
+
+static irqreturn_t ssi_pio_isr(int irq, void *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ tasklet_hi_schedule(&omap_port->pio_tasklet);
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void ssi_wake_tasklet(unsigned long ssi_port)
+{
+ struct hsi_port *port = (struct hsi_port *)ssi_port;
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ if (ssi_wakein(port)) {
+ /**
+ * We can have a quick High-Low-High transition in the line.
+ * In such a case if we have long interrupt latencies,
+ * we can miss the low event or get twice a high event.
+ * This workaround will avoid breaking the clock reference
+ * count when such a situation ocurrs.
+ */
+ spin_lock(&omap_port->lock);
+ if (!omap_port->wkin_cken) {
+ omap_port->wkin_cken = 1;
+ pm_runtime_get_sync(omap_port->pdev);
+ }
+ spin_unlock(&omap_port->lock);
+ dev_dbg(&ssi->device, "Wake in high\n");
+ if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
+ writel(SSI_WAKE(0),
+ omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
+ }
+ hsi_event(port, HSI_EVENT_START_RX);
+ } else {
+ dev_dbg(&ssi->device, "Wake in low\n");
+ if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
+ writel(SSI_WAKE(0),
+ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
+ }
+ hsi_event(port, HSI_EVENT_STOP_RX);
+ spin_lock(&omap_port->lock);
+ if (omap_port->wkin_cken) {
+ pm_runtime_put_sync(omap_port->pdev);
+ omap_port->wkin_cken = 0;
+ }
+ spin_unlock(&omap_port->lock);
+ }
+}
+
+static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
+
+ tasklet_hi_schedule(&omap_port->wake_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ssi_port_irq(struct hsi_port *port,
+ struct platform_device *pd)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ int err;
+
+ omap_port->irq = platform_get_irq(pd, 0);
+ if (omap_port->irq < 0) {
+ dev_err(&port->device, "Port IRQ resource missing\n");
+ return omap_port->irq;
+ }
+ tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
+ (unsigned long)port);
+ err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
+ 0, "mpu_irq0", port);
+ if (err < 0)
+ dev_err(&port->device, "Request IRQ %d failed (%d)\n",
+ omap_port->irq, err);
+ return err;
+}
+
+static int __init ssi_wake_irq(struct hsi_port *port,
+ struct platform_device *pd)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ int cawake_irq;
+ int err;
+
+ if (omap_port->wake_gpio == -1) {
+ omap_port->wake_irq = -1;
+ return 0;
+ }
+
+ cawake_irq = gpio_to_irq(omap_port->wake_gpio);
+
+ omap_port->wake_irq = cawake_irq;
+ tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
+ (unsigned long)port);
+ err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "cawake", port);
+ if (err < 0)
+ dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
+ cawake_irq, err);
+ err = enable_irq_wake(cawake_irq);
+ if (err < 0)
+ dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
+ cawake_irq, err);
+
+ return err;
+}
+
+static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
+{
+ unsigned int ch;
+
+ for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
+ INIT_LIST_HEAD(&omap_port->txqueue[ch]);
+ INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
+ }
+ INIT_LIST_HEAD(&omap_port->brkqueue);
+}
+
+static int __init ssi_port_get_iomem(struct platform_device *pd,
+ const char *name, void __iomem **pbase, dma_addr_t *phy)
+{
+ struct hsi_port *port = platform_get_drvdata(pd);
+ struct resource *mem;
+ struct resource *ioarea;
+ void __iomem *base;
+
+ mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
+ if (!mem) {
+ dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
+ return -ENXIO;
+ }
+ ioarea = devm_request_mem_region(&port->device, mem->start,
+ resource_size(mem), dev_name(&pd->dev));
+ if (!ioarea) {
+ dev_err(&pd->dev, "%s IO memory region request failed\n",
+ mem->name);
+ return -ENXIO;
+ }
+ base = devm_ioremap(&port->device, mem->start, resource_size(mem));
+ if (!base) {
+ dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
+ return -ENXIO;
+ }
+ *pbase = base;
+
+ if (phy)
+ *phy = mem->start;
+
+ return 0;
+}
+
+static int __init ssi_port_probe(struct platform_device *pd)
+{
+ struct device_node *np = pd->dev.of_node;
+ struct hsi_port *port;
+ struct omap_ssi_port *omap_port;
+ struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ u32 cawake_gpio = 0;
+ u32 port_id;
+ int err;
+
+ dev_dbg(&pd->dev, "init ssi port...\n");
+
+ err = ref_module(THIS_MODULE, ssi->owner);
+ if (err) {
+ dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
+ err);
+ return -ENODEV;
+ }
+
+ if (!ssi->port || !omap_ssi->port) {
+ dev_err(&pd->dev, "ssi controller not initialized!\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ /* get id of first uninitialized port in controller */
+ for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
+ port_id++)
+ ;
+
+ if (port_id >= ssi->num_ports) {
+ dev_err(&pd->dev, "port id out of range!\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ port = ssi->port[port_id];
+
+ if (!np) {
+ dev_err(&pd->dev, "missing device tree data\n");
+ err = -EINVAL;
+ goto error;
+ }
+
+ cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0);
+ if (cawake_gpio < 0) {
+ dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n",
+ cawake_gpio);
+ err = -ENODEV;
+ goto error;
+ }
+
+ err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN,
+ "cawake");
+ if (err) {
+ dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n",
+ err);
+ err = -ENXIO;
+ goto error;
+ }
+
+ omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
+ if (!omap_port) {
+ err = -ENOMEM;
+ goto error;
+ }
+ omap_port->wake_gpio = cawake_gpio;
+ omap_port->pdev = &pd->dev;
+ omap_port->port_id = port_id;
+
+ /* initialize HSI port */
+ port->async = ssi_async;
+ port->setup = ssi_setup;
+ port->flush = ssi_flush;
+ port->start_tx = ssi_start_tx;
+ port->stop_tx = ssi_stop_tx;
+ port->release = ssi_release;
+ hsi_port_set_drvdata(port, omap_port);
+ omap_ssi->port[port_id] = omap_port;
+
+ platform_set_drvdata(pd, port);
+
+ err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
+ &omap_port->sst_dma);
+ if (err < 0)
+ goto error;
+ err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
+ &omap_port->ssr_dma);
+ if (err < 0)
+ goto error;
+
+ err = ssi_port_irq(port, pd);
+ if (err < 0)
+ goto error;
+ err = ssi_wake_irq(port, pd);
+ if (err < 0)
+ goto error;
+
+ ssi_queues_init(omap_port);
+ spin_lock_init(&omap_port->lock);
+ spin_lock_init(&omap_port->wk_lock);
+ omap_port->dev = &port->device;
+
+ pm_runtime_irq_safe(omap_port->pdev);
+ pm_runtime_enable(omap_port->pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ err = ssi_debug_add_port(omap_port, omap_ssi->dir);
+ if (err < 0) {
+ pm_runtime_disable(omap_port->pdev);
+ goto error;
+ }
+#endif
+
+ hsi_add_clients_from_dt(port, np);
+
+ dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n",
+ port_id, cawake_gpio);
+
+ return 0;
+
+error:
+ return err;
+}
+
+static int __exit ssi_port_remove(struct platform_device *pd)
+{
+ struct hsi_port *port = platform_get_drvdata(pd);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+#ifdef CONFIG_DEBUG_FS
+ ssi_debug_remove_port(port);
+#endif
+
+ hsi_port_unregister_clients(port);
+
+ tasklet_kill(&omap_port->wake_tasklet);
+ tasklet_kill(&omap_port->pio_tasklet);
+
+ port->async = hsi_dummy_msg;
+ port->setup = hsi_dummy_cl;
+ port->flush = hsi_dummy_cl;
+ port->start_tx = hsi_dummy_cl;
+ port->stop_tx = hsi_dummy_cl;
+ port->release = hsi_dummy_cl;
+
+ omap_ssi->port[omap_port->port_id] = NULL;
+ platform_set_drvdata(pd, NULL);
+ pm_runtime_disable(&pd->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ omap_port->sys_mpu_enable = readl(omap_ssi->sys +
+ SSI_MPU_ENABLE_REG(port->num, 0));
+
+ return 0;
+}
+
+static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *base;
+
+ writel_relaxed(omap_port->sys_mpu_enable,
+ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+
+ /* SST context */
+ base = omap_port->sst_base;
+ writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
+ writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
+ writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
+
+ /* SSR context */
+ base = omap_port->ssr_base;
+ writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
+ writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
+ writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
+
+ return 0;
+}
+
+static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
+{
+ u32 mode;
+
+ writel_relaxed(omap_port->sst.mode,
+ omap_port->sst_base + SSI_SST_MODE_REG);
+ writel_relaxed(omap_port->ssr.mode,
+ omap_port->ssr_base + SSI_SSR_MODE_REG);
+ /* OCP barrier */
+ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
+
+ return 0;
+}
+
+static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
+{
+ writel_relaxed(omap_port->sst.divisor,
+ omap_port->sst_base + SSI_SST_DIVISOR_REG);
+
+ return 0;
+}
+
+static int omap_ssi_port_runtime_suspend(struct device *dev)
+{
+ struct hsi_port *port = dev_get_drvdata(dev);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(dev, "port runtime suspend!\n");
+
+ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
+ if (omap_ssi->get_loss)
+ omap_port->loss_count =
+ omap_ssi->get_loss(ssi->device.parent);
+ ssi_save_port_ctx(omap_port);
+
+ return 0;
+}
+
+static int omap_ssi_port_runtime_resume(struct device *dev)
+{
+ struct hsi_port *port = dev_get_drvdata(dev);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(dev, "port runtime resume!\n");
+
+ if ((omap_ssi->get_loss) && (omap_port->loss_count ==
+ omap_ssi->get_loss(ssi->device.parent)))
+ goto mode; /* We always need to restore the mode & TX divisor */
+
+ ssi_restore_port_ctx(omap_port);
+
+mode:
+ ssi_restore_divisor(omap_port);
+ ssi_restore_port_mode(omap_port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_ssi_port_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
+ omap_ssi_port_runtime_resume, NULL)
+};
+
+#define DEV_PM_OPS (&omap_ssi_port_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ssi_port_of_match[] = {
+ { .compatible = "ti,omap3-ssi-port", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
+#else
+#define omap_ssi_port_of_match NULL
+#endif
+
+static struct platform_driver ssi_port_pdriver = {
+ .remove = __exit_p(ssi_port_remove),
+ .driver = {
+ .name = "omap_ssi_port",
+ .owner = THIS_MODULE,
+ .of_match_table = omap_ssi_port_of_match,
+ .pm = DEV_PM_OPS,
+ },
+};
+
+module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe);
+
+MODULE_ALIAS("platform:omap_ssi_port");
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
+MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/controllers/omap_ssi_regs.h b/drivers/hsi/controllers/omap_ssi_regs.h
new file mode 100644
index 000000000000..08f98dd1d01f
--- /dev/null
+++ b/drivers/hsi/controllers/omap_ssi_regs.h
@@ -0,0 +1,171 @@
+/* Hardware definitions for SSI.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __OMAP_SSI_REGS_H__
+#define __OMAP_SSI_REGS_H__
+
+/*
+ * SSI SYS registers
+ */
+#define SSI_REVISION_REG 0
+# define SSI_REV_MAJOR 0xf0
+# define SSI_REV_MINOR 0xf
+#define SSI_SYSCONFIG_REG 0x10
+# define SSI_AUTOIDLE (1 << 0)
+# define SSI_SOFTRESET (1 << 1)
+# define SSI_SIDLEMODE_FORCE 0
+# define SSI_SIDLEMODE_NO (1 << 3)
+# define SSI_SIDLEMODE_SMART (1 << 4)
+# define SSI_SIDLEMODE_MASK 0x18
+# define SSI_MIDLEMODE_FORCE 0
+# define SSI_MIDLEMODE_NO (1 << 12)
+# define SSI_MIDLEMODE_SMART (1 << 13)
+# define SSI_MIDLEMODE_MASK 0x3000
+#define SSI_SYSSTATUS_REG 0x14
+# define SSI_RESETDONE 1
+#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2))
+#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8))
+# define SSI_DATAACCEPT(channel) (1 << (channel))
+# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8))
+# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16))
+# define SSI_ERROROCCURED (1 << 24)
+# define SSI_BREAKDETECTED (1 << 25)
+#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800
+#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804
+# define SSI_GDD_LCH(channel) (1 << (channel))
+#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10))
+#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10))
+#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10))
+# define SSI_WAKE(channel) (1 << (channel))
+# define SSI_WAKE_MASK 0xff
+
+/*
+ * SSI SST registers
+ */
+#define SSI_SST_ID_REG 0
+#define SSI_SST_MODE_REG 4
+# define SSI_MODE_VAL_MASK 3
+# define SSI_MODE_SLEEP 0
+# define SSI_MODE_STREAM 1
+# define SSI_MODE_FRAME 2
+# define SSI_MODE_MULTIPOINTS 3
+#define SSI_SST_FRAMESIZE_REG 8
+# define SSI_FRAMESIZE_DEFAULT 31
+#define SSI_SST_TXSTATE_REG 0xc
+# define SSI_TXSTATE_IDLE 0
+#define SSI_SST_BUFSTATE_REG 0x10
+# define SSI_FULL(channel) (1 << (channel))
+#define SSI_SST_DIVISOR_REG 0x18
+# define SSI_MAX_DIVISOR 127
+#define SSI_SST_BREAK_REG 0x20
+#define SSI_SST_CHANNELS_REG 0x24
+# define SSI_CHANNELS_DEFAULT 4
+#define SSI_SST_ARBMODE_REG 0x28
+# define SSI_ARBMODE_ROUNDROBIN 0
+# define SSI_ARBMODE_PRIORITY 1
+#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
+#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
+
+/*
+ * SSI SSR registers
+ */
+#define SSI_SSR_ID_REG 0
+#define SSI_SSR_MODE_REG 4
+#define SSI_SSR_FRAMESIZE_REG 8
+#define SSI_SSR_RXSTATE_REG 0xc
+#define SSI_SSR_BUFSTATE_REG 0x10
+# define SSI_NOTEMPTY(channel) (1 << (channel))
+#define SSI_SSR_BREAK_REG 0x1c
+#define SSI_SSR_ERROR_REG 0x20
+#define SSI_SSR_ERRORACK_REG 0x24
+#define SSI_SSR_OVERRUN_REG 0x2c
+#define SSI_SSR_OVERRUNACK_REG 0x30
+#define SSI_SSR_TIMEOUT_REG 0x34
+# define SSI_TIMEOUT_DEFAULT 0
+#define SSI_SSR_CHANNELS_REG 0x28
+#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
+#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
+
+/*
+ * SSI GDD registers
+ */
+#define SSI_GDD_HW_ID_REG 0
+#define SSI_GDD_PPORT_ID_REG 0x10
+#define SSI_GDD_MPORT_ID_REG 0x14
+#define SSI_GDD_PPORT_SR_REG 0x20
+#define SSI_GDD_MPORT_SR_REG 0x24
+# define SSI_ACTIVE_LCH_NUM_MASK 0xff
+#define SSI_GDD_TEST_REG 0x40
+# define SSI_TEST 1
+#define SSI_GDD_GCR_REG 0x100
+# define SSI_CLK_AUTOGATING_ON (1 << 3)
+# define SSI_FREE (1 << 2)
+# define SSI_SWITCH_OFF (1 << 0)
+#define SSI_GDD_GRST_REG 0x200
+# define SSI_SWRESET 1
+#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40))
+# define SSI_DST_BURST_EN_MASK 0xc000
+# define SSI_DST_SINGLE_ACCESS0 0
+# define SSI_DST_SINGLE_ACCESS (1 << 14)
+# define SSI_DST_BURST_4x32_BIT (2 << 14)
+# define SSI_DST_BURST_8x32_BIT (3 << 14)
+# define SSI_DST_MASK 0x1e00
+# define SSI_DST_MEMORY_PORT (8 << 9)
+# define SSI_DST_PERIPHERAL_PORT (9 << 9)
+# define SSI_SRC_BURST_EN_MASK 0x180
+# define SSI_SRC_SINGLE_ACCESS0 0
+# define SSI_SRC_SINGLE_ACCESS (1 << 7)
+# define SSI_SRC_BURST_4x32_BIT (2 << 7)
+# define SSI_SRC_BURST_8x32_BIT (3 << 7)
+# define SSI_SRC_MASK 0x3c
+# define SSI_SRC_MEMORY_PORT (8 << 2)
+# define SSI_SRC_PERIPHERAL_PORT (9 << 2)
+# define SSI_DATA_TYPE_MASK 3
+# define SSI_DATA_TYPE_S32 2
+#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40))
+# define SSI_DST_AMODE_MASK (3 << 14)
+# define SSI_DST_AMODE_CONST 0
+# define SSI_DST_AMODE_POSTINC (1 << 12)
+# define SSI_SRC_AMODE_MASK (3 << 12)
+# define SSI_SRC_AMODE_CONST 0
+# define SSI_SRC_AMODE_POSTINC (1 << 12)
+# define SSI_CCR_ENABLE (1 << 7)
+# define SSI_CCR_SYNC_MASK 0x1f
+#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40))
+# define SSI_BLOCK_IE (1 << 5)
+# define SSI_HALF_IE (1 << 2)
+# define SSI_TOUT_IE (1 << 0)
+#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40))
+# define SSI_CSR_SYNC (1 << 6)
+# define SSI_CSR_BLOCK (1 << 5)
+# define SSI_CSR_HALF (1 << 2)
+# define SSI_CSR_TOUR (1 << 0)
+#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40))
+#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40))
+#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40))
+#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40))
+#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40))
+#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40))
+# define SSI_ENABLE_LNK (1 << 15)
+# define SSI_STOP_LNK (1 << 14)
+# define SSI_NEXT_CH_ID_MASK 0xf
+
+#endif /* __OMAP_SSI_REGS_H__ */
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 749f7b5c8179..fe9371271ce2 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "hsi_core.h"
static ssize_t modalias_show(struct device *dev,
@@ -50,7 +52,13 @@ static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int hsi_bus_match(struct device *dev, struct device_driver *driver)
{
- return strcmp(dev_name(dev), driver->name) == 0;
+ if (of_driver_match_device(dev, driver))
+ return true;
+
+ if (strcmp(dev_name(dev), driver->name) == 0)
+ return true;
+
+ return false;
}
static struct bus_type hsi_bus_type = {
@@ -62,18 +70,37 @@ static struct bus_type hsi_bus_type = {
static void hsi_client_release(struct device *dev)
{
- kfree(to_hsi_client(dev));
+ struct hsi_client *cl = to_hsi_client(dev);
+
+ kfree(cl->tx_cfg.channels);
+ kfree(cl->rx_cfg.channels);
+ kfree(cl);
}
-static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
+struct hsi_client *hsi_new_client(struct hsi_port *port,
+ struct hsi_board_info *info)
{
struct hsi_client *cl;
+ size_t size;
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
- return;
+ return NULL;
+
cl->tx_cfg = info->tx_cfg;
+ if (cl->tx_cfg.channels) {
+ size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels);
+ cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL);
+ memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size);
+ }
+
cl->rx_cfg = info->rx_cfg;
+ if (cl->rx_cfg.channels) {
+ size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels);
+ cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL);
+ memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size);
+ }
+
cl->device.bus = &hsi_bus_type;
cl->device.parent = &port->device;
cl->device.release = hsi_client_release;
@@ -85,7 +112,10 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
pr_err("hsi: failed to register client: %s\n", info->name);
put_device(&cl->device);
}
+
+ return cl;
}
+EXPORT_SYMBOL_GPL(hsi_new_client);
static void hsi_scan_board_info(struct hsi_controller *hsi)
{
@@ -101,12 +131,209 @@ static void hsi_scan_board_info(struct hsi_controller *hsi)
}
}
-static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
+#ifdef CONFIG_OF
+static struct hsi_board_info hsi_char_dev_info = {
+ .name = "hsi_char",
+};
+
+static int hsi_of_property_parse_mode(struct device_node *client, char *name,
+ unsigned int *result)
+{
+ const char *mode;
+ int err;
+
+ err = of_property_read_string(client, name, &mode);
+ if (err < 0)
+ return err;
+
+ if (strcmp(mode, "stream") == 0)
+ *result = HSI_MODE_STREAM;
+ else if (strcmp(mode, "frame") == 0)
+ *result = HSI_MODE_FRAME;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsi_of_property_parse_flow(struct device_node *client, char *name,
+ unsigned int *result)
+{
+ const char *flow;
+ int err;
+
+ err = of_property_read_string(client, name, &flow);
+ if (err < 0)
+ return err;
+
+ if (strcmp(flow, "synchronized") == 0)
+ *result = HSI_FLOW_SYNC;
+ else if (strcmp(flow, "pipeline") == 0)
+ *result = HSI_FLOW_PIPE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsi_of_property_parse_arb_mode(struct device_node *client,
+ char *name, unsigned int *result)
+{
+ const char *arb_mode;
+ int err;
+
+ err = of_property_read_string(client, name, &arb_mode);
+ if (err < 0)
+ return err;
+
+ if (strcmp(arb_mode, "round-robin") == 0)
+ *result = HSI_ARB_RR;
+ else if (strcmp(arb_mode, "priority") == 0)
+ *result = HSI_ARB_PRIO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hsi_add_client_from_dt(struct hsi_port *port,
+ struct device_node *client)
+{
+ struct hsi_client *cl;
+ struct hsi_channel channel;
+ struct property *prop;
+ char name[32];
+ int length, cells, err, i, max_chan, mode;
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return;
+
+ err = of_modalias_node(client, name, sizeof(name));
+ if (err)
+ goto err;
+
+ dev_set_name(&cl->device, "%s", name);
+
+ err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
+ if (err) {
+ err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
+ &cl->rx_cfg.mode);
+ if (err)
+ goto err;
+
+ err = hsi_of_property_parse_mode(client, "hsi-tx-mode",
+ &cl->tx_cfg.mode);
+ if (err)
+ goto err;
+ } else {
+ cl->rx_cfg.mode = mode;
+ cl->tx_cfg.mode = mode;
+ }
+
+ err = of_property_read_u32(client, "hsi-speed-kbps",
+ &cl->tx_cfg.speed);
+ if (err)
+ goto err;
+ cl->rx_cfg.speed = cl->tx_cfg.speed;
+
+ err = hsi_of_property_parse_flow(client, "hsi-flow",
+ &cl->rx_cfg.flow);
+ if (err)
+ goto err;
+
+ err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode",
+ &cl->rx_cfg.arb_mode);
+ if (err)
+ goto err;
+
+ prop = of_find_property(client, "hsi-channel-ids", &length);
+ if (!prop) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ cells = length / sizeof(u32);
+
+ cl->rx_cfg.num_channels = cells;
+ cl->tx_cfg.num_channels = cells;
+
+ cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+ if (!cl->rx_cfg.channels) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+ if (!cl->tx_cfg.channels) {
+ err = -ENOMEM;
+ goto err2;
+ }
+
+ max_chan = 0;
+ for (i = 0; i < cells; i++) {
+ err = of_property_read_u32_index(client, "hsi-channel-ids", i,
+ &channel.id);
+ if (err)
+ goto err3;
+
+ err = of_property_read_string_index(client, "hsi-channel-names",
+ i, &channel.name);
+ if (err)
+ channel.name = NULL;
+
+ if (channel.id > max_chan)
+ max_chan = channel.id;
+
+ cl->rx_cfg.channels[i] = channel;
+ cl->tx_cfg.channels[i] = channel;
+ }
+
+ cl->rx_cfg.num_hw_channels = max_chan + 1;
+ cl->tx_cfg.num_hw_channels = max_chan + 1;
+
+ cl->device.bus = &hsi_bus_type;
+ cl->device.parent = &port->device;
+ cl->device.release = hsi_client_release;
+ cl->device.of_node = client;
+
+ if (device_register(&cl->device) < 0) {
+ pr_err("hsi: failed to register client: %s\n", name);
+ put_device(&cl->device);
+ goto err3;
+ }
+
+ return;
+
+err3:
+ kfree(cl->tx_cfg.channels);
+err2:
+ kfree(cl->rx_cfg.channels);
+err:
+ kfree(cl);
+ pr_err("hsi client: missing or incorrect of property: err=%d\n", err);
+}
+
+void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients)
+{
+ struct device_node *child;
+
+ /* register hsi-char device */
+ hsi_new_client(port, &hsi_char_dev_info);
+
+ for_each_available_child_of_node(clients, child)
+ hsi_add_client_from_dt(port, child);
+}
+EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt);
+#endif
+
+int hsi_remove_client(struct device *dev, void *data __maybe_unused)
{
device_unregister(dev);
return 0;
}
+EXPORT_SYMBOL_GPL(hsi_remove_client);
static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
{
@@ -130,6 +357,16 @@ static void hsi_port_release(struct device *dev)
}
/**
+ * hsi_unregister_port - Unregister an HSI port
+ * @port: The HSI port to unregister
+ */
+void hsi_port_unregister_clients(struct hsi_port *port)
+{
+ device_for_each_child(&port->device, NULL, hsi_remove_client);
+}
+EXPORT_SYMBOL_GPL(hsi_port_unregister_clients);
+
+/**
* hsi_unregister_controller - Unregister an HSI controller
* @hsi: The HSI controller to register
*/
@@ -472,7 +709,7 @@ int hsi_unregister_port_event(struct hsi_client *cl)
EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
/**
- * hsi_event -Notifies clients about port events
+ * hsi_event - Notifies clients about port events
* @port: Port where the event occurred
* @event: The event type
*
@@ -492,6 +729,32 @@ int hsi_event(struct hsi_port *port, unsigned long event)
}
EXPORT_SYMBOL_GPL(hsi_event);
+/**
+ * hsi_get_channel_id_by_name - acquire channel id by channel name
+ * @cl: HSI client, which uses the channel
+ * @name: name the channel is known under
+ *
+ * Clients can call this function to get the hsi channel ids similar to
+ * requesting IRQs or GPIOs by name. This function assumes the same
+ * channel configuration is used for RX and TX.
+ *
+ * Returns -errno on error or channel id on success.
+ */
+int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name)
+{
+ int i;
+
+ if (!cl->rx_cfg.channels)
+ return -ENOENT;
+
+ for (i = 0; i < cl->rx_cfg.num_channels; i++)
+ if (!strcmp(cl->rx_cfg.channels[i].name, name))
+ return cl->rx_cfg.channels[i].id;
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name);
+
static int __init hsi_init(void)
{
return bus_register(&hsi_bus_type);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 602ca86a6488..284cf66489f4 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+static void reset_channel_cb(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+
+ channel->onchannel_callback = NULL;
+}
+
static void vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
- unsigned long flags;
channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
- spin_lock_irqsave(&channel->inbound_lock, flags);
- channel->onchannel_callback = NULL;
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu, reset_channel_cb,
+ channel, true);
+ else
+ reset_channel_cb(channel);
/* Send a closing message */
@@ -674,8 +682,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
-
- if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
+ if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
return -EINVAL;
/*
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index fa920469bf10..6c8b032cacba 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void)
spin_lock_init(&channel->sc_lock);
INIT_LIST_HEAD(&channel->sc_list);
+ INIT_LIST_HEAD(&channel->percpu_list);
channel->controlwq = create_workqueue("hv_vmbus_ctl");
if (!channel->controlwq) {
@@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel)
queue_work(vmbus_connection.work_queue, &channel->work);
}
+static void percpu_channel_enq(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+ int cpu = smp_processor_id();
+
+ list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
+}
+static void percpu_channel_deq(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+
+ list_del(&channel->percpu_list);
+}
/*
* vmbus_process_rescind_offer -
@@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ percpu_channel_deq, channel, true);
+ else
+ percpu_channel_deq(channel);
+
if (channel->primary_channel == NULL) {
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&channel->listentry);
@@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work)
work);
struct vmbus_channel *channel;
bool fnew = true;
+ bool enq = false;
int ret;
unsigned long flags;
@@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
- if (fnew)
+ if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
+ enq = true;
+ }
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ if (enq) {
+ if (newchannel->target_cpu != smp_processor_id())
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_enq,
+ newchannel, true);
+ else
+ percpu_channel_enq(newchannel);
+ }
if (!fnew) {
/*
* Check to see if this is a sub-channel.
@@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work)
spin_lock_irqsave(&channel->sc_lock, flags);
list_add_tail(&newchannel->sc_list, &channel->sc_list);
spin_unlock_irqrestore(&channel->sc_lock, flags);
+
+ if (newchannel->target_cpu != smp_processor_id())
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_enq,
+ newchannel, true);
+ else
+ percpu_channel_enq(newchannel);
+
newchannel->state = CHANNEL_OPEN_STATE;
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
@@ -365,7 +404,7 @@ static u32 next_vp;
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
-static u32 get_vp_index(uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid)
{
u32 cur_cpu;
int i;
@@ -387,10 +426,13 @@ static u32 get_vp_index(uuid_le *type_guid)
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
- return 0;
+ channel->target_cpu = 0;
+ channel->target_vp = 0;
+ return;
}
cur_cpu = (++next_vp % max_cpus);
- return hv_context.vp_index[cur_cpu];
+ channel->target_cpu = cur_cpu;
+ channel->target_vp = hv_context.vp_index[cur_cpu];
}
/*
@@ -438,7 +480,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer->connection_id;
}
- newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+ init_vp_index(newchannel, &offer->offer.if_type);
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 2e7801af466e..e84f4526eb36 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -224,8 +224,8 @@ cleanup:
vmbus_connection.int_page = NULL;
}
- free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1);
- free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
+ free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
@@ -234,6 +234,28 @@ cleanup:
return ret;
}
+/*
+ * Map the given relid to the corresponding channel based on the
+ * per-cpu list of channels that have been affinitized to this CPU.
+ * This will be used in the channel callback path as we can do this
+ * mapping in a lock-free fashion.
+ */
+static struct vmbus_channel *pcpu_relid2channel(u32 relid)
+{
+ struct vmbus_channel *channel;
+ struct vmbus_channel *found_channel = NULL;
+ int cpu = smp_processor_id();
+ struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
+
+ list_for_each_entry(channel, pcpu_head, percpu_list) {
+ if (channel->offermsg.child_relid == relid) {
+ found_channel = channel;
+ break;
+ }
+ }
+
+ return found_channel;
+}
/*
* relid2channel - Get the channel object given its
@@ -277,7 +299,6 @@ struct vmbus_channel *relid2channel(u32 relid)
static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
- unsigned long flags;
void *arg;
bool read_state;
u32 bytes_to_read;
@@ -286,7 +307,7 @@ static void process_chn_event(u32 relid)
* Find the channel based on this relid and invokes the
* channel callback to process the event
*/
- channel = relid2channel(relid);
+ channel = pcpu_relid2channel(relid);
if (!channel) {
pr_err("channel not found for relid - %u\n", relid);
@@ -296,13 +317,12 @@ static void process_chn_event(u32 relid)
/*
* A channel once created is persistent even when there
* is no driver handling the device. An unloading driver
- * sets the onchannel_callback to NULL under the
- * protection of the channel inbound_lock. Thus, checking
- * and invoking the driver specific callback takes care of
- * orderly unloading of the driver.
+ * sets the onchannel_callback to NULL on the same CPU
+ * as where this interrupt is handled (in an interrupt context).
+ * Thus, checking and invoking the driver specific callback takes
+ * care of orderly unloading of the driver.
*/
- spin_lock_irqsave(&channel->inbound_lock, flags);
if (channel->onchannel_callback != NULL) {
arg = channel->channel_callback_context;
read_state = channel->batched_reading;
@@ -327,7 +347,6 @@ static void process_chn_event(u32 relid)
pr_err("no channel callback for relid - %u\n", relid);
}
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index bcb49502c3bf..edfc8488cb03 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -383,6 +383,8 @@ void hv_synic_init(void *arg)
*/
rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
hv_context.vp_index[cpu] = (u32)vp_index;
+
+ INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
return;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 7e6d78dc9437..5e90c5d771a7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/jiffies.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -459,6 +460,11 @@ static bool do_hot_add;
*/
static uint pressure_report_delay = 45;
+/*
+ * The last time we posted a pressure report to host.
+ */
+static unsigned long last_post_time;
+
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
@@ -542,6 +548,7 @@ struct hv_dynmem_device {
static struct hv_dynmem_device dm_device;
+static void post_status(struct hv_dynmem_device *dm);
#ifdef CONFIG_MEMORY_HOTPLUG
static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
@@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
* have not been "onlined" within the allowed time.
*/
wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
-
+ post_status(&dm_device);
}
return;
@@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
struct sysinfo val;
+ unsigned long now = jiffies;
+ unsigned long last_post = last_post_time;
if (pressure_report_delay > 0) {
--pressure_report_delay;
return;
}
+
+ if (!time_after(now, (last_post_time + HZ)))
+ return;
+
si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
@@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
if (status.hdr.trans_id != atomic_read(&trans_id))
return;
+ /*
+ * If the last post time that we sampled has changed,
+ * we have raced, don't post the status.
+ */
+ if (last_post != last_post_time)
+ return;
+
+ last_post_time = jiffies;
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
@@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
if (ret == -EAGAIN)
msleep(20);
-
+ post_status(&dm_device);
} while (ret == -EAGAIN);
if (ret) {
@@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
struct dm_unballoon_response resp;
int i;
- for (i = 0; i < range_count; i++)
+ for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
+ post_status(&dm_device);
+ }
if (req->more_pages == 1)
return;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 860134da8039..18d1a8404cbc 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -510,6 +510,11 @@ struct hv_context {
* basis.
*/
struct tasklet_struct *event_dpc[NR_CPUS];
+ /*
+ * To optimize the mapping of relid to channel, maintain
+ * per-cpu list of the channels based on their CPU affinity.
+ */
+ struct list_head percpu_list[NR_CPUS];
};
extern struct hv_context hv_context;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
config SENSORS_NTC_THERMISTOR
tristate "NTC thermistor support"
- depends on (!OF && !IIO) || (OF && IIO)
+ depends on !OF || IIO=n || IIO
help
This driver supports NTC thermistors sensor reading and its
interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
unsigned int ohm;
};
+/* Order matters, ntc_match references the entries by index */
static const struct platform_device_id ntc_thermistor_id[] = {
{ "ncp15wb473", TYPE_NCPXXWB473 },
{ "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
char name[PLATFORM_NAME_SIZE];
};
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
static const struct of_device_id ntc_match[] = {
{ .compatible = "ntc,ncp15wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[0] },
{ .compatible = "ntc,ncp18wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[1] },
{ .compatible = "ntc,ncp21wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[2] },
{ .compatible = "ntc,ncp03wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[3] },
{ .compatible = "ntc,ncp15wl333",
- .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+ .data = &ntc_thermistor_id[4] },
{ },
};
MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
return NULL;
}
+#define ntc_match NULL
+
static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
{ }
#endif
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 8242b75d96c8..611f34c7333d 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -26,7 +26,7 @@
struct vexpress_hwmon_data {
struct device *hwmon_dev;
- struct vexpress_config_func *func;
+ struct regmap *reg;
const char *name;
};
@@ -53,7 +53,7 @@ static ssize_t vexpress_hwmon_u32_show(struct device *dev,
int err;
u32 value;
- err = vexpress_config_read(data->func, 0, &value);
+ err = regmap_read(data->reg, 0, &value);
if (err)
return err;
@@ -68,11 +68,11 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev,
int err;
u32 value_hi, value_lo;
- err = vexpress_config_read(data->func, 0, &value_lo);
+ err = regmap_read(data->reg, 0, &value_lo);
if (err)
return err;
- err = vexpress_config_read(data->func, 1, &value_hi);
+ err = regmap_read(data->reg, 1, &value_hi);
if (err)
return err;
@@ -234,9 +234,9 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
type = match->data;
data->name = type->name;
- data->func = vexpress_config_func_get_by_dev(&pdev->dev);
- if (!data->func)
- return -ENODEV;
+ data->reg = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(data->reg))
+ return PTR_ERR(data->reg);
err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
if (err)
@@ -252,7 +252,6 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
error:
sysfs_remove_group(&pdev->dev.kobj, match->data);
- vexpress_config_func_put(data->func);
return err;
}
@@ -266,8 +265,6 @@ static int vexpress_hwmon_remove(struct platform_device *pdev)
match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
sysfs_remove_group(&pdev->dev.kobj, match->data);
- vexpress_config_func_put(data->func);
-
return 0;
}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 16f69be820c7..ee880382e3bc 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
- pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
+ pr_debug("%s: %sing: block=%llu, sectors=%u\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, blk_rq_sectors(rq),
- (unsigned long)rq->buffer);
+ (unsigned long long)block, blk_rq_sectors(rq));
if (hwif->rw_disk)
hwif->rw_disk(drive, rq);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 89777ed9abd8..3b5bacd4d8da 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -31,7 +31,108 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include <mach/at91_adc.h>
+/* Registers */
+#define AT91_ADC_CR 0x00 /* Control Register */
+#define AT91_ADC_SWRST (1 << 0) /* Software Reset */
+#define AT91_ADC_START (1 << 1) /* Start Conversion */
+
+#define AT91_ADC_MR 0x04 /* Mode Register */
+#define AT91_ADC_TSAMOD (3 << 0) /* ADC mode */
+#define AT91_ADC_TSAMOD_ADC_ONLY_MODE (0 << 0) /* ADC Mode */
+#define AT91_ADC_TSAMOD_TS_ONLY_MODE (1 << 0) /* Touch Screen Only Mode */
+#define AT91_ADC_TRGEN (1 << 0) /* Trigger Enable */
+#define AT91_ADC_TRGSEL (7 << 1) /* Trigger Selection */
+#define AT91_ADC_TRGSEL_TC0 (0 << 1)
+#define AT91_ADC_TRGSEL_TC1 (1 << 1)
+#define AT91_ADC_TRGSEL_TC2 (2 << 1)
+#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1)
+#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */
+#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */
+#define AT91_ADC_PENDET (1 << 6) /* Pen contact detection enable */
+#define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */
+#define AT91_ADC_PRESCAL_9G45 (0xff << 8)
+#define AT91_ADC_PRESCAL_(x) ((x) << 8)
+#define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */
+#define AT91_ADC_STARTUP_9G45 (0x7f << 16)
+#define AT91_ADC_STARTUP_9X5 (0xf << 16)
+#define AT91_ADC_STARTUP_(x) ((x) << 16)
+#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */
+#define AT91_ADC_SHTIM_(x) ((x) << 24)
+#define AT91_ADC_PENDBC (0x0f << 28) /* Pen Debounce time */
+#define AT91_ADC_PENDBC_(x) ((x) << 28)
+
+#define AT91_ADC_TSR 0x0C
+#define AT91_ADC_TSR_SHTIM (0xf << 24) /* Sample & Hold Time */
+#define AT91_ADC_TSR_SHTIM_(x) ((x) << 24)
+
+#define AT91_ADC_CHER 0x10 /* Channel Enable Register */
+#define AT91_ADC_CHDR 0x14 /* Channel Disable Register */
+#define AT91_ADC_CHSR 0x18 /* Channel Status Register */
+#define AT91_ADC_CH(n) (1 << (n)) /* Channel Number */
+
+#define AT91_ADC_SR 0x1C /* Status Register */
+#define AT91_ADC_EOC(n) (1 << (n)) /* End of Conversion on Channel N */
+#define AT91_ADC_OVRE(n) (1 << ((n) + 8))/* Overrun Error on Channel N */
+#define AT91_ADC_DRDY (1 << 16) /* Data Ready */
+#define AT91_ADC_GOVRE (1 << 17) /* General Overrun Error */
+#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */
+#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */
+
+#define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */
+#define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */
+
+#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */
+#define AT91_ADC_LDATA (0x3ff)
+
+#define AT91_ADC_IER 0x24 /* Interrupt Enable Register */
+#define AT91_ADC_IDR 0x28 /* Interrupt Disable Register */
+#define AT91_ADC_IMR 0x2C /* Interrupt Mask Register */
+#define AT91RL_ADC_IER_PEN (1 << 20)
+#define AT91RL_ADC_IER_NOPEN (1 << 21)
+#define AT91_ADC_IER_PEN (1 << 29)
+#define AT91_ADC_IER_NOPEN (1 << 30)
+#define AT91_ADC_IER_XRDY (1 << 20)
+#define AT91_ADC_IER_YRDY (1 << 21)
+#define AT91_ADC_IER_PRDY (1 << 22)
+#define AT91_ADC_ISR_PENS (1 << 31)
+
+#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */
+#define AT91_ADC_DATA (0x3ff)
+
+#define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */
+
+#define AT91_ADC_ACR 0x94 /* Analog Control Register */
+#define AT91_ADC_ACR_PENDETSENS (0x3 << 0) /* pull-up resistor */
+
+#define AT91_ADC_TSMR 0xB0
+#define AT91_ADC_TSMR_TSMODE (3 << 0) /* Touch Screen Mode */
+#define AT91_ADC_TSMR_TSMODE_NONE (0 << 0)
+#define AT91_ADC_TSMR_TSMODE_4WIRE_NO_PRESS (1 << 0)
+#define AT91_ADC_TSMR_TSMODE_4WIRE_PRESS (2 << 0)
+#define AT91_ADC_TSMR_TSMODE_5WIRE (3 << 0)
+#define AT91_ADC_TSMR_TSAV (3 << 4) /* Averages samples */
+#define AT91_ADC_TSMR_TSAV_(x) ((x) << 4)
+#define AT91_ADC_TSMR_SCTIM (0x0f << 16) /* Switch closure time */
+#define AT91_ADC_TSMR_PENDBC (0x0f << 28) /* Pen Debounce time */
+#define AT91_ADC_TSMR_PENDBC_(x) ((x) << 28)
+#define AT91_ADC_TSMR_NOTSDMA (1 << 22) /* No Touchscreen DMA */
+#define AT91_ADC_TSMR_PENDET_DIS (0 << 24) /* Pen contact detection disable */
+#define AT91_ADC_TSMR_PENDET_ENA (1 << 24) /* Pen contact detection enable */
+
+#define AT91_ADC_TSXPOSR 0xB4
+#define AT91_ADC_TSYPOSR 0xB8
+#define AT91_ADC_TSPRESSR 0xBC
+
+#define AT91_ADC_TRGR_9260 AT91_ADC_MR
+#define AT91_ADC_TRGR_9G45 0x08
+#define AT91_ADC_TRGR_9X5 0xC0
+
+/* Trigger Register bit field */
+#define AT91_ADC_TRGR_TRGPER (0xffff << 16)
+#define AT91_ADC_TRGR_TRGPER_(x) ((x) << 16)
+#define AT91_ADC_TRGR_TRGMOD (0x7 << 0)
+#define AT91_ADC_TRGR_NONE (0 << 0)
+#define AT91_ADC_TRGR_MOD_PERIOD_TRIG (5 << 0)
#define AT91_ADC_CHAN(st, ch) \
(st->registers->channel_base + (ch * 4))
@@ -46,6 +147,29 @@
#define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
#define TOUCH_PEN_DETECT_DEBOUNCE_US 200
+#define MAX_RLPOS_BITS 10
+#define TOUCH_SAMPLE_PERIOD_US_RL 10000 /* 10ms, the SoC can't keep up with 2ms */
+#define TOUCH_SHTIM 0xa
+
+/**
+ * struct at91_adc_reg_desc - Various informations relative to registers
+ * @channel_base: Base offset for the channel data registers
+ * @drdy_mask: Mask of the DRDY field in the relevant registers
+ (Interruptions registers mostly)
+ * @status_register: Offset of the Interrupt Status Register
+ * @trigger_register: Offset of the Trigger setup register
+ * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register
+ * @mr_startup_mask: Mask of the STARTUP field in the adc MR register
+ */
+struct at91_adc_reg_desc {
+ u8 channel_base;
+ u32 drdy_mask;
+ u8 status_register;
+ u8 trigger_register;
+ u32 mr_prescal_mask;
+ u32 mr_startup_mask;
+};
+
struct at91_adc_caps {
bool has_ts; /* Support touch screen */
bool has_tsmr; /* only at91sam9x5, sama5d3 have TSMR reg */
@@ -64,12 +188,6 @@ struct at91_adc_caps {
struct at91_adc_reg_desc registers;
};
-enum atmel_adc_ts_type {
- ATMEL_ADC_TOUCHSCREEN_NONE = 0,
- ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
- ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
-};
-
struct at91_adc_state {
struct clk *adc_clk;
u16 *buffer;
@@ -114,6 +232,11 @@ struct at91_adc_state {
u16 ts_sample_period_val;
u32 ts_pressure_threshold;
+ u16 ts_pendbc;
+
+ bool ts_bufferedmeasure;
+ u32 ts_prev_absx;
+ u32 ts_prev_absy;
};
static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
@@ -220,7 +343,72 @@ static int at91_ts_sample(struct at91_adc_state *st)
return 0;
}
-static irqreturn_t at91_adc_interrupt(int irq, void *private)
+static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct at91_adc_state *st = iio_priv(idev);
+ u32 status = at91_adc_readl(st, st->registers->status_register);
+ unsigned int reg;
+
+ status &= at91_adc_readl(st, AT91_ADC_IMR);
+ if (status & st->registers->drdy_mask)
+ handle_adc_eoc_trigger(irq, idev);
+
+ if (status & AT91RL_ADC_IER_PEN) {
+ /* Disabling pen debounce is required to get a NOPEN irq */
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg &= ~AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN);
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_NOPEN
+ | AT91_ADC_EOC(3));
+ /* Set up period trigger for sampling */
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_MOD_PERIOD_TRIG |
+ AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val));
+ } else if (status & AT91RL_ADC_IER_NOPEN) {
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_NONE);
+
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_NOPEN
+ | AT91_ADC_EOC(3));
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN);
+ st->ts_bufferedmeasure = false;
+ input_report_key(st->ts_input, BTN_TOUCH, 0);
+ input_sync(st->ts_input);
+ } else if (status & AT91_ADC_EOC(3)) {
+ /* Conversion finished */
+ if (st->ts_bufferedmeasure) {
+ /*
+ * Last measurement is always discarded, since it can
+ * be erroneous.
+ * Always report previous measurement
+ */
+ input_report_abs(st->ts_input, ABS_X, st->ts_prev_absx);
+ input_report_abs(st->ts_input, ABS_Y, st->ts_prev_absy);
+ input_report_key(st->ts_input, BTN_TOUCH, 1);
+ input_sync(st->ts_input);
+ } else
+ st->ts_bufferedmeasure = true;
+
+ /* Now make new measurement */
+ st->ts_prev_absx = at91_adc_readl(st, AT91_ADC_CHAN(st, 3))
+ << MAX_RLPOS_BITS;
+ st->ts_prev_absx /= at91_adc_readl(st, AT91_ADC_CHAN(st, 2));
+
+ st->ts_prev_absy = at91_adc_readl(st, AT91_ADC_CHAN(st, 1))
+ << MAX_RLPOS_BITS;
+ st->ts_prev_absy /= at91_adc_readl(st, AT91_ADC_CHAN(st, 0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private)
{
struct iio_dev *idev = private;
struct at91_adc_state *st = iio_priv(idev);
@@ -653,6 +841,8 @@ static int at91_adc_probe_dt_ts(struct device_node *node,
return -EINVAL;
}
+ if (!st->caps->has_tsmr)
+ return 0;
prop = 0;
of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop);
st->ts_pressure_threshold = prop;
@@ -776,6 +966,7 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
st->trigger_number = pdata->trigger_number;
st->trigger_list = pdata->trigger_list;
st->registers = &st->caps->registers;
+ st->touchscreen_type = pdata->touchscreen_type;
return 0;
}
@@ -790,7 +981,10 @@ static int atmel_ts_open(struct input_dev *dev)
{
struct at91_adc_state *st = input_get_drvdata(dev);
- at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+ if (st->caps->has_tsmr)
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+ else
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN);
return 0;
}
@@ -798,45 +992,61 @@ static void atmel_ts_close(struct input_dev *dev)
{
struct at91_adc_state *st = input_get_drvdata(dev);
- at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+ if (st->caps->has_tsmr)
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+ else
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN);
}
static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
{
- u32 reg = 0, pendbc;
+ u32 reg = 0;
int i = 0;
- if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
- reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS;
- else
- reg = AT91_ADC_TSMR_TSMODE_5WIRE;
-
/* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid
* pen detect noise.
* The formula is : Pen Detect Debounce Time = (2 ^ pendbc) / ADCClock
*/
- pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / 1000, 1);
+ st->ts_pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz /
+ 1000, 1);
- while (pendbc >> ++i)
+ while (st->ts_pendbc >> ++i)
; /* Empty! Find the shift offset */
- if (abs(pendbc - (1 << i)) < abs(pendbc - (1 << (i - 1))))
- pendbc = i;
+ if (abs(st->ts_pendbc - (1 << i)) < abs(st->ts_pendbc - (1 << (i - 1))))
+ st->ts_pendbc = i;
else
- pendbc = i - 1;
+ st->ts_pendbc = i - 1;
- if (st->caps->has_tsmr) {
- reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average)
- & AT91_ADC_TSMR_TSAV;
- reg |= AT91_ADC_TSMR_PENDBC_(pendbc) & AT91_ADC_TSMR_PENDBC;
- reg |= AT91_ADC_TSMR_NOTSDMA;
- reg |= AT91_ADC_TSMR_PENDET_ENA;
- reg |= 0x03 << 8; /* TSFREQ, need bigger than TSAV */
-
- at91_adc_writel(st, AT91_ADC_TSMR, reg);
- } else {
- /* TODO: for 9g45 which has no TSMR */
+ if (!st->caps->has_tsmr) {
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg |= AT91_ADC_TSAMOD_TS_ONLY_MODE | AT91_ADC_PENDET;
+
+ reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+
+ reg = AT91_ADC_TSR_SHTIM_(TOUCH_SHTIM) & AT91_ADC_TSR_SHTIM;
+ at91_adc_writel(st, AT91_ADC_TSR, reg);
+
+ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US_RL *
+ adc_clk_khz / 1000) - 1, 1);
+
+ return 0;
}
+ if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
+ reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS;
+ else
+ reg = AT91_ADC_TSMR_TSMODE_5WIRE;
+
+ reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average)
+ & AT91_ADC_TSMR_TSAV;
+ reg |= AT91_ADC_TSMR_PENDBC_(st->ts_pendbc) & AT91_ADC_TSMR_PENDBC;
+ reg |= AT91_ADC_TSMR_NOTSDMA;
+ reg |= AT91_ADC_TSMR_PENDET_ENA;
+ reg |= 0x03 << 8; /* TSFREQ, needs to be bigger than TSAV */
+
+ at91_adc_writel(st, AT91_ADC_TSMR, reg);
+
/* Change adc internal resistor value for better pen detection,
* default value is 100 kOhm.
* 0 = 200 kOhm, 1 = 150 kOhm, 2 = 100 kOhm, 3 = 50 kOhm
@@ -845,7 +1055,7 @@ static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
at91_adc_writel(st, AT91_ADC_ACR, st->caps->ts_pen_detect_sensitivity
& AT91_ADC_ACR_PENDETSENS);
- /* Sample Peroid Time = (TRGPER + 1) / ADCClock */
+ /* Sample Period Time = (TRGPER + 1) / ADCClock */
st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US *
adc_clk_khz / 1000) - 1, 1);
@@ -874,18 +1084,38 @@ static int at91_ts_register(struct at91_adc_state *st,
__set_bit(EV_ABS, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, 0, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0);
+ if (st->caps->has_tsmr) {
+ input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0);
+ } else {
+ if (st->touchscreen_type != ATMEL_ADC_TOUCHSCREEN_4WIRE) {
+ dev_err(&pdev->dev,
+ "This touchscreen controller only support 4 wires\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ input_set_abs_params(input, ABS_X, 0, (1 << MAX_RLPOS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, (1 << MAX_RLPOS_BITS) - 1,
+ 0, 0);
+ }
st->ts_input = input;
input_set_drvdata(input, st);
ret = input_register_device(input);
if (ret)
- input_free_device(st->ts_input);
+ goto err;
return ret;
+
+err:
+ input_free_device(st->ts_input);
+ return ret;
}
static void at91_ts_unregister(struct at91_adc_state *st)
@@ -943,11 +1173,13 @@ static int at91_adc_probe(struct platform_device *pdev)
*/
at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST);
at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF);
- ret = request_irq(st->irq,
- at91_adc_interrupt,
- 0,
- pdev->dev.driver->name,
- idev);
+
+ if (st->caps->has_tsmr)
+ ret = request_irq(st->irq, at91_adc_9x5_interrupt, 0,
+ pdev->dev.driver->name, idev);
+ else
+ ret = request_irq(st->irq, at91_adc_rl_interrupt, 0,
+ pdev->dev.driver->name, idev);
if (ret) {
dev_err(&pdev->dev, "Failed to allocate IRQ.\n");
return ret;
@@ -1051,12 +1283,6 @@ static int at91_adc_probe(struct platform_device *pdev)
goto error_disable_adc_clk;
}
} else {
- if (!st->caps->has_tsmr) {
- dev_err(&pdev->dev, "We don't support non-TSMR adc\n");
- ret = -ENODEV;
- goto error_disable_adc_clk;
- }
-
ret = at91_ts_register(st, pdev);
if (ret)
goto error_disable_adc_clk;
@@ -1120,6 +1346,20 @@ static struct at91_adc_caps at91sam9260_caps = {
},
};
+static struct at91_adc_caps at91sam9rl_caps = {
+ .has_ts = true,
+ .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */
+ .num_channels = 6,
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9G45,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9260,
+ .mr_startup_mask = AT91_ADC_STARTUP_9G45,
+ },
+};
+
static struct at91_adc_caps at91sam9g45_caps = {
.has_ts = true,
.calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */
@@ -1154,6 +1394,7 @@ static struct at91_adc_caps at91sam9x5_caps = {
static const struct of_device_id at91_adc_dt_ids[] = {
{ .compatible = "atmel,at91sam9260-adc", .data = &at91sam9260_caps },
+ { .compatible = "atmel,at91sam9rl-adc", .data = &at91sam9rl_caps },
{ .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps },
{ .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps },
{},
@@ -1165,6 +1406,9 @@ static const struct platform_device_id at91_adc_ids[] = {
.name = "at91sam9260-adc",
.driver_data = (unsigned long)&at91sam9260_caps,
}, {
+ .name = "at91sam9rl-adc",
+ .driver_data = (unsigned long)&at91sam9rl_caps,
+ }, {
.name = "at91sam9g45-adc",
.driver_data = (unsigned long)&at91sam9g45_caps,
}, {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
}
#endif
+#define MLX4_IB_INVALID_MAC ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ int port)
+{
+ u64 new_smac = 0;
+ u64 release_mac = MLX4_IB_INVALID_MAC;
+ struct mlx4_ib_qp *qp;
+
+ read_lock(&dev_base_lock);
+ new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ read_unlock(&dev_base_lock);
+
+ mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+ qp = ibdev->qp1_proxy[port - 1];
+ if (qp) {
+ int new_smac_index;
+ u64 old_smac = qp->pri.smac;
+ struct mlx4_update_qp_params update_params;
+
+ if (new_smac == old_smac)
+ goto unlock;
+
+ new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+ if (new_smac_index < 0)
+ goto unlock;
+
+ update_params.smac_index = new_smac_index;
+ if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ &update_params)) {
+ release_mac = new_smac;
+ goto unlock;
+ }
+
+ qp->pri.smac = new_smac;
+ qp->pri.smac_index = new_smac_index;
+
+ release_mac = old_smac;
+ }
+
+unlock:
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+ if (release_mac != MLX4_IB_INVALID_MAC)
+ mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct mlx4_ib_dev *ibdev, u8 port)
{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
return 0;
}
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ unsigned long event)
+
{
struct mlx4_ib_iboe *iboe;
+ int update_qps_port = -1;
int port;
iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
curr_master = iboe->masters[port - 1];
+ if (dev == iboe->netdevs[port - 1] &&
+ (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+ event == NETDEV_UP || event == NETDEV_CHANGE))
+ update_qps_port = port;
+
if (curr_netdev) {
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
spin_unlock(&iboe->lock);
+
+ if (update_qps_port > 0)
+ mlx4_ib_update_qps(ibdev, dev, update_qps_port);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, dev, event);
return NOTIFY_DONE;
}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_map;
for (i = 0; i < ibdev->num_ports; ++i) {
+ mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (i = 1 ; i <= ibdev->num_ports ; ++i)
reset_gid_table(ibdev, i);
rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, NULL, 0);
rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
int steer_qpn_count;
int steer_qpn_base;
int steering_support;
+ struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
+ /* lock when destroying qp1_proxy and getting netdev events */
+ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
+ if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ dev->qp1_proxy[mqp->port - 1] = NULL;
+ mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ }
+
pd = get_pd(mqp);
destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
if (err)
return -EINVAL;
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ dev->qp1_proxy[qp->port - 1] = qp;
}
}
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
select SERIO_GSCPS2 if GSC
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
unsigned short keycodes[MAX_KEYPAD_KEYS];
int rotary_rel_code[2];
+ unsigned int row_shift;
+
/* state row bits of each column scan */
uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
if ((bits_changed & (1 << row)) == 0)
continue;
- code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
goto failed_put_clk;
}
+ keypad->row_shift = get_count_order(pdata->matrix_key_cols);
+
if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
(pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
select SERIO_GSCPS2 if GSC
help
Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d68d33fb5ac2..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
}
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+struct min_max_quirk {
+ const char * const *pnp_ids;
+ int x_min, x_max, y_min, y_max;
+};
+
+static const struct min_max_quirk min_max_pnpid_table[] = {
+ {
+ (const char * const []){"LEN0033", NULL},
+ 1024, 5052, 2258, 4832
+ },
+ {
+ (const char * const []){"LEN0035", "LEN0042", NULL},
+ 1232, 5710, 1156, 4696
+ },
+ {
+ (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+ 1024, 5112, 2024, 4832
+ },
+ {
+ (const char * const []){"LEN2001", NULL},
+ 1024, 5022, 2508, 4832
+ },
+ { }
+};
+
/* This list has been kindly provided by Synaptics. */
static const char * const topbuttonpad_pnp_ids[] = {
"LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN002D",
"LEN002E",
"LEN0033", /* Helix */
- "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+ "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
"LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0048",
"LEN0049",
"LEN2000",
- "LEN2001",
+ "LEN2001", /* Edge E431 */
"LEN2002",
"LEN2003",
"LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
NULL
};
+static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
+{
+ int i;
+
+ if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
+ for (i = 0; ids[i]; i++)
+ if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
+ return true;
+
+ return false;
+}
+
/*****************************************************************************
* Synaptics communications functions
****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
* Resolution is left zero if touchpad does not support the query
*/
-static const int *quirk_min_max;
-
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ int i;
- if (quirk_min_max) {
- priv->x_min = quirk_min_max[0];
- priv->x_max = quirk_min_max[1];
- priv->y_min = quirk_min_max[2];
- priv->y_max = quirk_min_max[3];
- return 0;
- }
+ for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
+ if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+ priv->x_min = min_max_pnpid_table[i].x_min;
+ priv->x_max = min_max_pnpid_table[i].x_max;
+ priv->y_min = min_max_pnpid_table[i].y_min;
+ priv->y_max = min_max_pnpid_table[i].y_max;
+ return 0;
+ }
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- /* See if this buttonpad has a top button area */
- if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
- for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
- if (strstr(psmouse->ps2dev.serio->firmware_id,
- topbuttonpad_pnp_ids[i])) {
- __set_bit(INPUT_PROP_TOPBUTTONPAD,
- dev->propbit);
- break;
- }
- }
- }
+ if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+ __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
/* Clickpads report only left button */
__clear_bit(BTN_RIGHT, dev->keybit);
__clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
-static const struct dmi_system_id min_max_dmi_table[] __initconst = {
-#if defined(CONFIG_DMI)
- {
- /* Lenovo ThinkPad Helix */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
- },
- .driver_data = (int []){1024, 5052, 2258, 4832},
- },
- {
- /* Lenovo ThinkPad X240 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
- },
- .driver_data = (int []){1232, 5710, 1156, 4696},
- },
- {
- /* Lenovo ThinkPad Edge E431 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
- },
- .driver_data = (int []){1024, 5022, 2508, 4832},
- },
- {
- /* Lenovo ThinkPad T431s */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad T440s */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad L440 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad T540p */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
- },
- .driver_data = (int []){1024, 5056, 2058, 4832},
- },
- {
- /* Lenovo ThinkPad L540 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo Yoga S1 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
- "ThinkPad S1 Yoga"),
- },
- .driver_data = (int []){1232, 5710, 1156, 4696},
- },
- {
- /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION,
- "ThinkPad X1 Carbon 2nd"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
-#endif
- { }
-};
-
void __init synaptics_module_init(void)
{
- const struct dmi_system_id *min_max_dmi;
-
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
-
- min_max_dmi = dmi_first_match(min_max_dmi_table);
- if (min_max_dmi)
- quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
writeb(divisor, KMICLKDIV);
writeb(KMICR_EN, KMICR);
- ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
+ ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
+ kmi);
if (ret) {
printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..d4e5ab57909f 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -550,18 +550,6 @@ config TOUCHSCREEN_TI_AM335X_TSC
To compile this driver as a module, choose M here: the
module will be called ti_am335x_tsc.
-config TOUCHSCREEN_ATMEL_TSADCC
- tristate "Atmel Touchscreen Interface"
- depends on ARCH_AT91
- help
- Say Y here if you have a 4-wire touchscreen connected to the
- ADC Controller on your Atmel SoC.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called atmel_tsadcc.
-
config TOUCHSCREEN_UCB1400
tristate "Philips UCB1400 touchscreen"
depends on AC97_BUS
@@ -640,7 +628,7 @@ config TOUCHSCREEN_WM9713
config TOUCHSCREEN_WM97XX_ATMEL
tristate "WM97xx Atmel accelerated touch"
- depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
+ depends on TOUCHSCREEN_WM97XX && AVR32
help
Say Y here for support for streaming mode with WM97xx touchscreens
on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 4bc954b7c7c3..03f12a1f2218 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
-obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
deleted file mode 100644
index a7c9d6967d1e..000000000000
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Atmel Touch Screen Driver
- *
- * Copyright (c) 2008 ATMEL
- * Copyright (c) 2008 Dan Liang
- * Copyright (c) 2008 TimeSys Corporation
- * Copyright (c) 2008 Justin Waters
- *
- * Based on touchscreen code from Atmel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/platform_data/atmel.h>
-#include <mach/cpu.h>
-
-/* Register definitions based on AT91SAM9RL64 preliminary draft datasheet */
-
-#define ATMEL_TSADCC_CR 0x00 /* Control register */
-#define ATMEL_TSADCC_SWRST (1 << 0) /* Software Reset*/
-#define ATMEL_TSADCC_START (1 << 1) /* Start conversion */
-
-#define ATMEL_TSADCC_MR 0x04 /* Mode register */
-#define ATMEL_TSADCC_TSAMOD (3 << 0) /* ADC mode */
-#define ATMEL_TSADCC_TSAMOD_ADC_ONLY_MODE (0x0) /* ADC Mode */
-#define ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE (0x1) /* Touch Screen Only Mode */
-#define ATMEL_TSADCC_LOWRES (1 << 4) /* Resolution selection */
-#define ATMEL_TSADCC_SLEEP (1 << 5) /* Sleep mode */
-#define ATMEL_TSADCC_PENDET (1 << 6) /* Pen Detect selection */
-#define ATMEL_TSADCC_PRES (1 << 7) /* Pressure Measurement Selection */
-#define ATMEL_TSADCC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */
-#define ATMEL_TSADCC_EPRESCAL (0xff << 8) /* Prescalar Rate Selection (Extended) */
-#define ATMEL_TSADCC_STARTUP (0x7f << 16) /* Start Up time */
-#define ATMEL_TSADCC_SHTIM (0xf << 24) /* Sample & Hold time */
-#define ATMEL_TSADCC_PENDBC (0xf << 28) /* Pen Detect debouncing time */
-
-#define ATMEL_TSADCC_TRGR 0x08 /* Trigger register */
-#define ATMEL_TSADCC_TRGMOD (7 << 0) /* Trigger mode */
-#define ATMEL_TSADCC_TRGMOD_NONE (0 << 0)
-#define ATMEL_TSADCC_TRGMOD_EXT_RISING (1 << 0)
-#define ATMEL_TSADCC_TRGMOD_EXT_FALLING (2 << 0)
-#define ATMEL_TSADCC_TRGMOD_EXT_ANY (3 << 0)
-#define ATMEL_TSADCC_TRGMOD_PENDET (4 << 0)
-#define ATMEL_TSADCC_TRGMOD_PERIOD (5 << 0)
-#define ATMEL_TSADCC_TRGMOD_CONTINUOUS (6 << 0)
-#define ATMEL_TSADCC_TRGPER (0xffff << 16) /* Trigger period */
-
-#define ATMEL_TSADCC_TSR 0x0C /* Touch Screen register */
-#define ATMEL_TSADCC_TSFREQ (0xf << 0) /* TS Frequency in Interleaved mode */
-#define ATMEL_TSADCC_TSSHTIM (0xf << 24) /* Sample & Hold time */
-
-#define ATMEL_TSADCC_CHER 0x10 /* Channel Enable register */
-#define ATMEL_TSADCC_CHDR 0x14 /* Channel Disable register */
-#define ATMEL_TSADCC_CHSR 0x18 /* Channel Status register */
-#define ATMEL_TSADCC_CH(n) (1 << (n)) /* Channel number */
-
-#define ATMEL_TSADCC_SR 0x1C /* Status register */
-#define ATMEL_TSADCC_EOC(n) (1 << ((n)+0)) /* End of conversion for channel N */
-#define ATMEL_TSADCC_OVRE(n) (1 << ((n)+8)) /* Overrun error for channel N */
-#define ATMEL_TSADCC_DRDY (1 << 16) /* Data Ready */
-#define ATMEL_TSADCC_GOVRE (1 << 17) /* General Overrun Error */
-#define ATMEL_TSADCC_ENDRX (1 << 18) /* End of RX Buffer */
-#define ATMEL_TSADCC_RXBUFF (1 << 19) /* TX Buffer full */
-#define ATMEL_TSADCC_PENCNT (1 << 20) /* Pen contact */
-#define ATMEL_TSADCC_NOCNT (1 << 21) /* No contact */
-
-#define ATMEL_TSADCC_LCDR 0x20 /* Last Converted Data register */
-#define ATMEL_TSADCC_DATA (0x3ff << 0) /* Channel data */
-
-#define ATMEL_TSADCC_IER 0x24 /* Interrupt Enable register */
-#define ATMEL_TSADCC_IDR 0x28 /* Interrupt Disable register */
-#define ATMEL_TSADCC_IMR 0x2C /* Interrupt Mask register */
-#define ATMEL_TSADCC_CDR0 0x30 /* Channel Data 0 */
-#define ATMEL_TSADCC_CDR1 0x34 /* Channel Data 1 */
-#define ATMEL_TSADCC_CDR2 0x38 /* Channel Data 2 */
-#define ATMEL_TSADCC_CDR3 0x3C /* Channel Data 3 */
-#define ATMEL_TSADCC_CDR4 0x40 /* Channel Data 4 */
-#define ATMEL_TSADCC_CDR5 0x44 /* Channel Data 5 */
-
-#define ATMEL_TSADCC_XPOS 0x50
-#define ATMEL_TSADCC_Z1DAT 0x54
-#define ATMEL_TSADCC_Z2DAT 0x58
-
-#define PRESCALER_VAL(x) ((x) >> 8)
-
-#define ADC_DEFAULT_CLOCK 100000
-
-struct atmel_tsadcc {
- struct input_dev *input;
- char phys[32];
- struct clk *clk;
- int irq;
- unsigned int prev_absx;
- unsigned int prev_absy;
- unsigned char bufferedmeasure;
-};
-
-static void __iomem *tsc_base;
-
-#define atmel_tsadcc_read(reg) __raw_readl(tsc_base + (reg))
-#define atmel_tsadcc_write(reg, val) __raw_writel((val), tsc_base + (reg))
-
-static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev)
-{
- struct atmel_tsadcc *ts_dev = (struct atmel_tsadcc *)dev;
- struct input_dev *input_dev = ts_dev->input;
-
- unsigned int status;
- unsigned int reg;
-
- status = atmel_tsadcc_read(ATMEL_TSADCC_SR);
- status &= atmel_tsadcc_read(ATMEL_TSADCC_IMR);
-
- if (status & ATMEL_TSADCC_NOCNT) {
- /* Contact lost */
- reg = atmel_tsadcc_read(ATMEL_TSADCC_MR) | ATMEL_TSADCC_PENDBC;
-
- atmel_tsadcc_write(ATMEL_TSADCC_MR, reg);
- atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE);
- atmel_tsadcc_write(ATMEL_TSADCC_IDR,
- ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT);
- atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT);
-
- input_report_key(input_dev, BTN_TOUCH, 0);
- ts_dev->bufferedmeasure = 0;
- input_sync(input_dev);
-
- } else if (status & ATMEL_TSADCC_PENCNT) {
- /* Pen detected */
- reg = atmel_tsadcc_read(ATMEL_TSADCC_MR);
- reg &= ~ATMEL_TSADCC_PENDBC;
-
- atmel_tsadcc_write(ATMEL_TSADCC_IDR, ATMEL_TSADCC_PENCNT);
- atmel_tsadcc_write(ATMEL_TSADCC_MR, reg);
- atmel_tsadcc_write(ATMEL_TSADCC_IER,
- ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT);
- atmel_tsadcc_write(ATMEL_TSADCC_TRGR,
- ATMEL_TSADCC_TRGMOD_PERIOD | (0x0FFF << 16));
-
- } else if (status & ATMEL_TSADCC_EOC(3)) {
- /* Conversion finished */
-
- if (ts_dev->bufferedmeasure) {
- /* Last measurement is always discarded, since it can
- * be erroneous.
- * Always report previous measurement */
- input_report_abs(input_dev, ABS_X, ts_dev->prev_absx);
- input_report_abs(input_dev, ABS_Y, ts_dev->prev_absy);
- input_report_key(input_dev, BTN_TOUCH, 1);
- input_sync(input_dev);
- } else
- ts_dev->bufferedmeasure = 1;
-
- /* Now make new measurement */
- ts_dev->prev_absx = atmel_tsadcc_read(ATMEL_TSADCC_CDR3) << 10;
- ts_dev->prev_absx /= atmel_tsadcc_read(ATMEL_TSADCC_CDR2);
-
- ts_dev->prev_absy = atmel_tsadcc_read(ATMEL_TSADCC_CDR1) << 10;
- ts_dev->prev_absy /= atmel_tsadcc_read(ATMEL_TSADCC_CDR0);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int atmel_tsadcc_probe(struct platform_device *pdev)
-{
- struct atmel_tsadcc *ts_dev;
- struct input_dev *input_dev;
- struct resource *res;
- struct at91_tsadcc_data *pdata = dev_get_platdata(&pdev->dev);
- int err;
- unsigned int prsc;
- unsigned int reg;
-
- if (!pdata)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no mmio resource defined.\n");
- return -ENXIO;
- }
-
- /* Allocate memory for device */
- ts_dev = kzalloc(sizeof(struct atmel_tsadcc), GFP_KERNEL);
- if (!ts_dev) {
- dev_err(&pdev->dev, "failed to allocate memory.\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, ts_dev);
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(&pdev->dev, "failed to allocate input device.\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- ts_dev->irq = platform_get_irq(pdev, 0);
- if (ts_dev->irq < 0) {
- dev_err(&pdev->dev, "no irq ID is designated.\n");
- err = -ENODEV;
- goto err_free_dev;
- }
-
- if (!request_mem_region(res->start, resource_size(res),
- "atmel tsadcc regs")) {
- dev_err(&pdev->dev, "resources is unavailable.\n");
- err = -EBUSY;
- goto err_free_dev;
- }
-
- tsc_base = ioremap(res->start, resource_size(res));
- if (!tsc_base) {
- dev_err(&pdev->dev, "failed to map registers.\n");
- err = -ENOMEM;
- goto err_release_mem;
- }
-
- err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, 0,
- pdev->dev.driver->name, ts_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to allocate irq.\n");
- goto err_unmap_regs;
- }
-
- ts_dev->clk = clk_get(&pdev->dev, "tsc_clk");
- if (IS_ERR(ts_dev->clk)) {
- dev_err(&pdev->dev, "failed to get ts_clk\n");
- err = PTR_ERR(ts_dev->clk);
- goto err_free_irq;
- }
-
- ts_dev->input = input_dev;
- ts_dev->bufferedmeasure = 0;
-
- snprintf(ts_dev->phys, sizeof(ts_dev->phys),
- "%s/input0", dev_name(&pdev->dev));
-
- input_dev->name = "atmel touch screen controller";
- input_dev->phys = ts_dev->phys;
- input_dev->dev.parent = &pdev->dev;
-
- __set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_X, 0, 0x3FF, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 0x3FF, 0, 0);
-
- input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
-
- /* clk_enable() always returns 0, no need to check it */
- clk_enable(ts_dev->clk);
-
- prsc = clk_get_rate(ts_dev->clk);
- dev_info(&pdev->dev, "Master clock is set at: %d Hz\n", prsc);
-
- if (!pdata->adc_clock)
- pdata->adc_clock = ADC_DEFAULT_CLOCK;
-
- prsc = (prsc / (2 * pdata->adc_clock)) - 1;
-
- /* saturate if this value is too high */
- if (cpu_is_at91sam9rl()) {
- if (prsc > PRESCALER_VAL(ATMEL_TSADCC_PRESCAL))
- prsc = PRESCALER_VAL(ATMEL_TSADCC_PRESCAL);
- } else {
- if (prsc > PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL))
- prsc = PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL);
- }
-
- dev_info(&pdev->dev, "Prescaler is set at: %d\n", prsc);
-
- reg = ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE |
- ((0x00 << 5) & ATMEL_TSADCC_SLEEP) | /* Normal Mode */
- ((0x01 << 6) & ATMEL_TSADCC_PENDET) | /* Enable Pen Detect */
- (prsc << 8) |
- ((0x26 << 16) & ATMEL_TSADCC_STARTUP) |
- ((pdata->pendet_debounce << 28) & ATMEL_TSADCC_PENDBC);
-
- atmel_tsadcc_write(ATMEL_TSADCC_CR, ATMEL_TSADCC_SWRST);
- atmel_tsadcc_write(ATMEL_TSADCC_MR, reg);
- atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE);
- atmel_tsadcc_write(ATMEL_TSADCC_TSR,
- (pdata->ts_sample_hold_time << 24) & ATMEL_TSADCC_TSSHTIM);
-
- atmel_tsadcc_read(ATMEL_TSADCC_SR);
- atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT);
-
- /* All went ok, so register to the input system */
- err = input_register_device(input_dev);
- if (err)
- goto err_fail;
-
- return 0;
-
-err_fail:
- clk_disable(ts_dev->clk);
- clk_put(ts_dev->clk);
-err_free_irq:
- free_irq(ts_dev->irq, ts_dev);
-err_unmap_regs:
- iounmap(tsc_base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_dev:
- input_free_device(input_dev);
-err_free_mem:
- kfree(ts_dev);
- return err;
-}
-
-static int atmel_tsadcc_remove(struct platform_device *pdev)
-{
- struct atmel_tsadcc *ts_dev = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(ts_dev->irq, ts_dev);
-
- input_unregister_device(ts_dev->input);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(tsc_base);
- release_mem_region(res->start, resource_size(res));
-
- clk_disable(ts_dev->clk);
- clk_put(ts_dev->clk);
-
- kfree(ts_dev);
-
- return 0;
-}
-
-static struct platform_driver atmel_tsadcc_driver = {
- .probe = atmel_tsadcc_probe,
- .remove = atmel_tsadcc_remove,
- .driver = {
- .name = "atmel_tsadcc",
- },
-};
-module_platform_driver(atmel_tsadcc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Atmel TouchScreen Driver");
-MODULE_AUTHOR("Dan Liang <dan.liang@atmel.com>");
-
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index b97a6ab95a9d..685f9263cfee 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1006,13 +1006,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
}
static struct iommu_ops exynos_iommu_ops = {
- .domain_init = &exynos_iommu_domain_init,
- .domain_destroy = &exynos_iommu_domain_destroy,
- .attach_dev = &exynos_iommu_attach_device,
- .detach_dev = &exynos_iommu_detach_device,
- .map = &exynos_iommu_map,
- .unmap = &exynos_iommu_unmap,
- .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .domain_init = exynos_iommu_domain_init,
+ .domain_destroy = exynos_iommu_domain_destroy,
+ .attach_dev = exynos_iommu_attach_device,
+ .detach_dev = exynos_iommu_detach_device,
+ .map = exynos_iommu_map,
+ .unmap = exynos_iommu_unmap,
+ .iova_to_phys = exynos_iommu_iova_to_phys,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
};
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3899ba7821c5..c887e6eebc41 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -310,7 +311,8 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
}
#ifdef CONFIG_SMP
-void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
+static void armada_mpic_send_doorbell(const struct cpumask *mask,
+ unsigned int irq)
{
int cpu;
unsigned long map = 0;
@@ -330,7 +332,7 @@ void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
-void armada_xp_mpic_smp_cpu_init(void)
+static void armada_xp_mpic_smp_cpu_init(void)
{
/* Clear pending IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
@@ -342,6 +344,20 @@ void armada_xp_mpic_smp_cpu_init(void)
/* Unmask IPI interrupt */
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
+
+static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ armada_xp_mpic_smp_cpu_init();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
+ .notifier_call = armada_xp_mpic_secondary_init,
+ .priority = 100,
+};
+
#endif /* CONFIG_SMP */
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -497,6 +513,10 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
set_handle_irq(armada_370_xp_handle_irq);
+#ifdef CONFIG_SMP
+ set_smp_cross_call(armada_mpic_send_doorbell);
+ register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
+#endif
} else {
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e25f246cd2fb..34d18b48bb78 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -42,7 +42,7 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
gc->mask_cache;
while (stat) {
- u32 hwirq = ffs(stat) - 1;
+ u32 hwirq = __fls(stat);
u32 irq = irq_find_mapping(orion_irq_domain,
gc->irq_base + hwirq);
handle_IRQ(irq, regs);
@@ -117,7 +117,7 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
gc->mask_cache;
while (stat) {
- u32 hwirq = ffs(stat) - 1;
+ u32 hwirq = __fls(stat);
generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
stat &= ~(1 << hwirq);
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 581eefe331ae..5e54f6d71e77 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -58,7 +58,8 @@ static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
handle_IRQ(irqnr, regs);
}
-static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *parent)
+static int __init sirfsoc_irq_init(struct device_node *np,
+ struct device_node *parent)
{
void __iomem *base = of_iomap(np, 0);
if (!base)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6de9dfbf61c1..39e717797cc0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -487,6 +487,14 @@ config LEDS_BLINKM
This option enables support for the BlinkM RGB LED connected
through I2C. Say Y to enable support for the BlinkM LED.
+config LEDS_VERSATILE
+ tristate "LED support for the ARM Versatile and RealView"
+ depends on ARCH_REALVIEW || ARCH_VERSATILE
+ depends on LEDS_CLASS
+ help
+ This option enabled support for the LEDs on the ARM Versatile
+ and RealView boards. Say Y to enabled these.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3cd76dbd9be2..8b4c956e11ba 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-versatile.c b/drivers/leds/leds-versatile.c
new file mode 100644
index 000000000000..80553022d661
--- /dev/null
+++ b/drivers/leds/leds-versatile.c
@@ -0,0 +1,110 @@
+/*
+ * Driver for the 8 user LEDs found on the RealViews and Versatiles
+ * Based on DaVinci's DM365 board code
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Linus Walleij <triad@df.lth.se>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+struct versatile_led {
+ void __iomem *base;
+ struct led_classdev cdev;
+ u8 mask;
+};
+
+/*
+ * The triggers lines up below will only be used if the
+ * LED triggers are compiled in.
+ */
+static const struct {
+ const char *name;
+ const char *trigger;
+} versatile_leds[] = {
+ { "versatile:0", "heartbeat", },
+ { "versatile:1", "mmc0", },
+ { "versatile:2", "cpu0" },
+ { "versatile:3", "cpu1" },
+ { "versatile:4", "cpu2" },
+ { "versatile:5", "cpu3" },
+ { "versatile:6", },
+ { "versatile:7", },
+};
+
+static void versatile_led_set(struct led_classdev *cdev,
+ enum led_brightness b)
+{
+ struct versatile_led *led = container_of(cdev,
+ struct versatile_led, cdev);
+ u32 reg = readl(led->base);
+
+ if (b != LED_OFF)
+ reg |= led->mask;
+ else
+ reg &= ~led->mask;
+ writel(reg, led->base);
+}
+
+static enum led_brightness versatile_led_get(struct led_classdev *cdev)
+{
+ struct versatile_led *led = container_of(cdev,
+ struct versatile_led, cdev);
+ u32 reg = readl(led->base);
+
+ return (reg & led->mask) ? LED_FULL : LED_OFF;
+}
+
+static int versatile_leds_probe(struct platform_device *dev)
+{
+ int i;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* All off */
+ writel(0, base);
+ for (i = 0; i < ARRAY_SIZE(versatile_leds); i++) {
+ struct versatile_led *led;
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (!led)
+ break;
+
+ led->base = base;
+ led->cdev.name = versatile_leds[i].name;
+ led->cdev.brightness_set = versatile_led_set;
+ led->cdev.brightness_get = versatile_led_get;
+ led->cdev.default_trigger = versatile_leds[i].trigger;
+ led->mask = BIT(i);
+
+ if (led_classdev_register(NULL, &led->cdev) < 0) {
+ kfree(led);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver versatile_leds_driver = {
+ .driver = {
+ .name = "versatile-leds",
+ },
+ .probe = versatile_leds_probe,
+};
+
+module_platform_driver(versatile_leds_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("ARM Versatile LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index bbe12932d404..9018ab83517a 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -183,14 +183,14 @@ EXPORT_SYMBOL_GPL(mcb_device_register);
*
* Allocate a new @mcb_bus.
*/
-struct mcb_bus *mcb_alloc_bus(void)
+struct mcb_bus *mcb_alloc_bus(struct device *carrier)
{
struct mcb_bus *bus;
int bus_nr;
bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
if (!bus)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
if (bus_nr < 0) {
@@ -200,7 +200,7 @@ struct mcb_bus *mcb_alloc_bus(void)
INIT_LIST_HEAD(&bus->children);
bus->bus_nr = bus_nr;
-
+ bus->carrier = carrier;
return bus;
}
EXPORT_SYMBOL_GPL(mcb_alloc_bus);
@@ -378,6 +378,13 @@ void mcb_release_mem(struct resource *mem)
}
EXPORT_SYMBOL_GPL(mcb_release_mem);
+static int __mcb_get_irq(struct mcb_device *dev)
+{
+ struct resource *irq = &dev->irq;
+
+ return irq->start;
+}
+
/**
* mcb_get_irq() - Get device's IRQ number
* @dev: The @mcb_device the IRQ is for
@@ -386,9 +393,12 @@ EXPORT_SYMBOL_GPL(mcb_release_mem);
*/
int mcb_get_irq(struct mcb_device *dev)
{
- struct resource *irq = &dev->irq;
+ struct mcb_bus *bus = dev->bus;
- return irq->start;
+ if (bus->get_irq)
+ return bus->get_irq(dev);
+
+ return __mcb_get_irq(dev);
}
EXPORT_SYMBOL_GPL(mcb_get_irq);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 99c742cbfb5b..b59181965643 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -20,6 +20,15 @@ struct priv {
void __iomem *base;
};
+static int mcb_pci_get_irq(struct mcb_device *mdev)
+{
+ struct mcb_bus *mbus = mdev->bus;
+ struct device *dev = mbus->carrier;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return pdev->irq;
+}
+
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct priv *priv;
@@ -67,7 +76,13 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, priv);
- priv->bus = mcb_alloc_bus();
+ priv->bus = mcb_alloc_bus(&pdev->dev);
+ if (IS_ERR(priv->bus)) {
+ ret = PTR_ERR(priv->bus);
+ goto err_drvdata;
+ }
+
+ priv->bus->get_irq = mcb_pci_get_irq;
ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
if (ret < 0)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9380be7b1895..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->num_discard_bios = 1;
ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
+ /* Discard bios must be split on a block boundary */
+ ti->split_discard_bios = true;
cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fa0f6cbd6a41..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
else
m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
- if (!m->queue_if_no_path)
- dm_table_run_md_queue_async(m->ti->table);
-
spin_unlock_irqrestore(&m->lock, flags);
+ if (!queue_if_no_path)
+ dm_table_run_md_queue_async(m->ti->table);
+
return 0;
}
@@ -954,7 +954,7 @@ out:
*/
static int reinstate_path(struct pgpath *pgpath)
{
- int r = 0;
+ int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
if (!m->nr_valid_paths++) {
m->current_pgpath = NULL;
- dm_table_run_md_queue_async(m->ti->table);
+ run_queue = 1;
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
out:
spin_unlock_irqrestore(&m->lock, flags);
+ if (run_queue)
+ dm_table_run_md_queue_async(m->ti->table);
return r;
}
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
}
if (m->pg_init_required)
__pg_init_all_paths(m);
- dm_table_run_md_queue_async(m->ti->table);
spin_unlock_irqrestore(&m->lock, flags);
+ dm_table_run_md_queue_async(m->ti->table);
}
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2e71de8e0048..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,7 +27,9 @@
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
-#define NO_SPACE_TIMEOUT (HZ * 60)
+#define NO_SPACE_TIMEOUT_SECS 60
+
+static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
+ unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_prepared_mapping = process_prepared_mapping;
pool->process_prepared_discard = process_prepared_discard_passdown;
- if (!pool->pf.error_if_no_space)
- queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
+ if (!pool->pf.error_if_no_space && no_space_timeout)
+ queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
break;
case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
module_init(dm_thin_init);
module_exit(dm_thin_exit);
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
+
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 455e64916498..6a71bc7c9133 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq,
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
- clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index b59a17fb7c3e..ff7138fd66d1 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -2,7 +2,7 @@
* Marvell EBU SoC Device Bus Controller
* (memory controller for NOR/NAND/SRAM/FPGA devices)
*
- * Copyright (C) 2013 Marvell
+ * Copyright (C) 2013-2014 Marvell
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,19 +30,47 @@
#include <linux/platform_device.h>
/* Register definitions */
-#define DEV_WIDTH_BIT 30
-#define BADR_SKEW_BIT 28
-#define RD_HOLD_BIT 23
-#define ACC_NEXT_BIT 17
-#define RD_SETUP_BIT 12
-#define ACC_FIRST_BIT 6
-
-#define SYNC_ENABLE_BIT 24
-#define WR_HIGH_BIT 16
-#define WR_LOW_BIT 8
-
-#define READ_PARAM_OFFSET 0x0
-#define WRITE_PARAM_OFFSET 0x4
+#define ARMADA_DEV_WIDTH_SHIFT 30
+#define ARMADA_BADR_SKEW_SHIFT 28
+#define ARMADA_RD_HOLD_SHIFT 23
+#define ARMADA_ACC_NEXT_SHIFT 17
+#define ARMADA_RD_SETUP_SHIFT 12
+#define ARMADA_ACC_FIRST_SHIFT 6
+
+#define ARMADA_SYNC_ENABLE_SHIFT 24
+#define ARMADA_WR_HIGH_SHIFT 16
+#define ARMADA_WR_LOW_SHIFT 8
+
+#define ARMADA_READ_PARAM_OFFSET 0x0
+#define ARMADA_WRITE_PARAM_OFFSET 0x4
+
+#define ORION_RESERVED (0x2 << 30)
+#define ORION_BADR_SKEW_SHIFT 28
+#define ORION_WR_HIGH_EXT_BIT BIT(27)
+#define ORION_WR_HIGH_EXT_MASK 0x8
+#define ORION_WR_LOW_EXT_BIT BIT(26)
+#define ORION_WR_LOW_EXT_MASK 0x8
+#define ORION_ALE_WR_EXT_BIT BIT(25)
+#define ORION_ALE_WR_EXT_MASK 0x8
+#define ORION_ACC_NEXT_EXT_BIT BIT(24)
+#define ORION_ACC_NEXT_EXT_MASK 0x10
+#define ORION_ACC_FIRST_EXT_BIT BIT(23)
+#define ORION_ACC_FIRST_EXT_MASK 0x10
+#define ORION_TURN_OFF_EXT_BIT BIT(22)
+#define ORION_TURN_OFF_EXT_MASK 0x8
+#define ORION_DEV_WIDTH_SHIFT 20
+#define ORION_WR_HIGH_SHIFT 17
+#define ORION_WR_HIGH_MASK 0x7
+#define ORION_WR_LOW_SHIFT 14
+#define ORION_WR_LOW_MASK 0x7
+#define ORION_ALE_WR_SHIFT 11
+#define ORION_ALE_WR_MASK 0x7
+#define ORION_ACC_NEXT_SHIFT 7
+#define ORION_ACC_NEXT_MASK 0xF
+#define ORION_ACC_FIRST_SHIFT 3
+#define ORION_ACC_FIRST_MASK 0xF
+#define ORION_TURN_OFF_SHIFT 0
+#define ORION_TURN_OFF_MASK 0x7
struct devbus_read_params {
u32 bus_width;
@@ -89,19 +117,14 @@ static int get_timing_param_ps(struct devbus *devbus,
return 0;
}
-static int devbus_set_timing_params(struct devbus *devbus,
- struct device_node *node)
+static int devbus_get_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
{
- struct devbus_read_params r;
- struct devbus_write_params w;
- u32 value;
int err;
- dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
- devbus->tick_ps);
-
- /* Get read timings */
- err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width);
+ err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width);
if (err < 0) {
dev_err(devbus->dev,
"%s has no 'devbus,bus-width' property\n",
@@ -113,104 +136,148 @@ static int devbus_set_timing_params(struct devbus *devbus,
* The bus width is encoded into the register as 0 for 8 bits,
* and 1 for 16 bits, so we do the necessary conversion here.
*/
- if (r.bus_width == 8)
- r.bus_width = 0;
- else if (r.bus_width == 16)
- r.bus_width = 1;
+ if (r->bus_width == 8)
+ r->bus_width = 0;
+ else if (r->bus_width == 16)
+ r->bus_width = 1;
else {
- dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+ dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
return -EINVAL;
}
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
- &r.badr_skew);
+ &r->badr_skew);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
- &r.turn_off);
+ &r->turn_off);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
- &r.acc_first);
+ &r->acc_first);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
- &r.acc_next);
- if (err < 0)
- return err;
-
- err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
- &r.rd_setup);
+ &r->acc_next);
if (err < 0)
return err;
- err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
- &r.rd_hold);
- if (err < 0)
- return err;
-
- /* Get write timings */
- err = of_property_read_u32(node, "devbus,sync-enable",
- &w.sync_enable);
- if (err < 0) {
- dev_err(devbus->dev,
- "%s has no 'devbus,sync-enable' property\n",
- node->full_name);
- return err;
+ if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) {
+ err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
+ &r->rd_setup);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
+ &r->rd_hold);
+ if (err < 0)
+ return err;
+
+ err = of_property_read_u32(node, "devbus,sync-enable",
+ &w->sync_enable);
+ if (err < 0) {
+ dev_err(devbus->dev,
+ "%s has no 'devbus,sync-enable' property\n",
+ node->full_name);
+ return err;
+ }
}
err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
- &w.ale_wr);
+ &w->ale_wr);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
- &w.wr_low);
+ &w->wr_low);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
- &w.wr_high);
+ &w->wr_high);
if (err < 0)
return err;
+ return 0;
+}
+
+static void devbus_orion_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
+ /*
+ * The hardware designers found it would be a good idea to
+ * split most of the values in the register into two fields:
+ * one containing all the low-order bits, and another one
+ * containing just the high-order bit. For all of those
+ * fields, we have to split the value into these two parts.
+ */
+ value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT |
+ (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT |
+ (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT |
+ (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT |
+ (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT |
+ (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT |
+ r->bus_width << ORION_DEV_WIDTH_SHIFT |
+ ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) |
+ ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) |
+ ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) |
+ ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) |
+ ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) |
+ ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) |
+ (r->badr_skew << ORION_BADR_SKEW_SHIFT) |
+ ORION_RESERVED;
+
+ writel(value, devbus->base);
+}
+
+static void devbus_armada_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
/* Set read timings */
- value = r.bus_width << DEV_WIDTH_BIT |
- r.badr_skew << BADR_SKEW_BIT |
- r.rd_hold << RD_HOLD_BIT |
- r.acc_next << ACC_NEXT_BIT |
- r.rd_setup << RD_SETUP_BIT |
- r.acc_first << ACC_FIRST_BIT |
- r.turn_off;
+ value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT |
+ r->badr_skew << ARMADA_BADR_SKEW_SHIFT |
+ r->rd_hold << ARMADA_RD_HOLD_SHIFT |
+ r->acc_next << ARMADA_ACC_NEXT_SHIFT |
+ r->rd_setup << ARMADA_RD_SETUP_SHIFT |
+ r->acc_first << ARMADA_ACC_FIRST_SHIFT |
+ r->turn_off;
dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n",
- devbus->base + READ_PARAM_OFFSET,
+ devbus->base + ARMADA_READ_PARAM_OFFSET,
value);
- writel(value, devbus->base + READ_PARAM_OFFSET);
+ writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET);
/* Set write timings */
- value = w.sync_enable << SYNC_ENABLE_BIT |
- w.wr_low << WR_LOW_BIT |
- w.wr_high << WR_HIGH_BIT |
- w.ale_wr;
+ value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT |
+ w->wr_low << ARMADA_WR_LOW_SHIFT |
+ w->wr_high << ARMADA_WR_HIGH_SHIFT |
+ w->ale_wr;
dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n",
- devbus->base + WRITE_PARAM_OFFSET,
+ devbus->base + ARMADA_WRITE_PARAM_OFFSET,
value);
- writel(value, devbus->base + WRITE_PARAM_OFFSET);
-
- return 0;
+ writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET);
}
static int mvebu_devbus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
+ struct devbus_read_params r;
+ struct devbus_write_params w;
struct devbus *devbus;
struct resource *res;
struct clk *clk;
@@ -240,10 +307,21 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
rate = clk_get_rate(clk) / 1000;
devbus->tick_ps = 1000000000 / rate;
- /* Read the device tree node and set the new timing parameters */
- err = devbus_set_timing_params(devbus, node);
- if (err < 0)
- return err;
+ dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
+ devbus->tick_ps);
+
+ if (!of_property_read_bool(node, "devbus,keep-config")) {
+ /* Read the Device Tree node */
+ err = devbus_get_timing_params(devbus, node, &r, &w);
+ if (err < 0)
+ return err;
+
+ /* Set the new timing parameters */
+ if (of_device_is_compatible(node, "marvell,orion-devbus"))
+ devbus_orion_set_timing_params(devbus, node, &r, &w);
+ else
+ devbus_armada_set_timing_params(devbus, node, &r, &w);
+ }
/*
* We need to create a child device explicitly from here to
@@ -259,6 +337,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
static const struct of_device_id mvebu_devbus_of_match[] = {
{ .compatible = "marvell,mvebu-devbus" },
+ { .compatible = "marvell,orion-devbus" },
{},
};
MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 33834120d057..6deb8a11c12f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -331,15 +331,15 @@ config MFD_88PM860X
battery-charger under the corresponding menus.
config MFD_MAX14577
- bool "Maxim Semiconductor MAX14577 MUIC + Charger Support"
+ bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
select IRQ_DOMAIN
help
- Say yes here to add support for Maxim Semiconductor MAX14577.
- This is a Micro-USB IC with Charger controls on chip.
+ Say yes here to add support for Maxim Semiconductor MAX14577 and
+ MAX77836 Micro-USB ICs with battery charger.
This driver provides common support for accessing the device;
additional drivers must be enabled in order to use the functionality
of the device.
@@ -1227,12 +1227,17 @@ config MCP_UCB1200_TS
endmenu
-config VEXPRESS_CONFIG
- bool "ARM Versatile Express platform infrastructure"
- depends on ARM || ARM64
+config MFD_VEXPRESS_SYSREG
+ bool "Versatile Express System Registers"
+ depends on VEXPRESS_CONFIG && GPIOLIB
+ default y
+ select CLKSRC_MMIO
+ select GPIO_GENERIC_PLATFORM
+ select MFD_CORE
+ select MFD_SYSCON
help
- Platform configuration infrastructure for the ARM Ltd.
- Versatile Express.
+ System Registers are the platform configuration block
+ on the ARM Ltd. Versatile Express board.
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 2851275e2656..cec3487b539e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
-obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
+obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o
obj-$(CONFIG_MFD_RETU) += retu-mfd.o
obj-$(CONFIG_MFD_AS3711) += as3711.o
obj-$(CONFIG_MFD_AS3722) += as3722.o
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 5f13cefe8def..484d372a4892 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -1,7 +1,7 @@
/*
- * max14577.c - mfd core driver for the Maxim 14577
+ * max14577.c - mfd core driver for the Maxim 14577/77836
*
- * Copyright (C) 2013 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <k.kozlowski@samsung.com>
*
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max14577.h>
#include <linux/mfd/max14577-private.h>
@@ -37,7 +38,38 @@ static struct mfd_cell max14577_devs[] = {
{ .name = "max14577-charger", },
};
-static bool max14577_volatile_reg(struct device *dev, unsigned int reg)
+static struct mfd_cell max77836_devs[] = {
+ {
+ .name = "max77836-muic",
+ .of_compatible = "maxim,max77836-muic",
+ },
+ {
+ .name = "max77836-regulator",
+ .of_compatible = "maxim,max77836-regulator",
+ },
+ {
+ .name = "max77836-charger",
+ .of_compatible = "maxim,max77836-charger",
+ },
+ {
+ .name = "max77836-battery",
+ .of_compatible = "maxim,max77836-battery",
+ },
+};
+
+static struct of_device_id max14577_dt_match[] = {
+ {
+ .compatible = "maxim,max14577",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX14577,
+ },
+ {
+ .compatible = "maxim,max77836",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX77836,
+ },
+ {},
+};
+
+static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX14577_REG_INT1 ... MAX14577_REG_STATUS3:
@@ -48,49 +80,221 @@ static bool max14577_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
-static const struct regmap_config max14577_regmap_config = {
+static bool max77836_muic_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* Any max14577 volatile registers are also max77836 volatile. */
+ if (max14577_muic_volatile_reg(dev, reg))
+ return true;
+
+ switch (reg) {
+ case MAX77836_FG_REG_VCELL_MSB ... MAX77836_FG_REG_SOC_LSB:
+ case MAX77836_FG_REG_CRATE_MSB ... MAX77836_FG_REG_CRATE_LSB:
+ case MAX77836_FG_REG_STATUS_H ... MAX77836_FG_REG_STATUS_L:
+ case MAX77836_PMIC_REG_INTSRC:
+ case MAX77836_PMIC_REG_TOPSYS_INT:
+ case MAX77836_PMIC_REG_TOPSYS_STAT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const struct regmap_config max14577_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_reg = max14577_volatile_reg,
+ .volatile_reg = max14577_muic_volatile_reg,
.max_register = MAX14577_REG_END,
};
+static const struct regmap_config max77836_pmic_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = max77836_muic_volatile_reg,
+ .max_register = MAX77836_PMIC_REG_END,
+};
+
static const struct regmap_irq max14577_irqs[] = {
/* INT1 interrupts */
- { .reg_offset = 0, .mask = INT1_ADC_MASK, },
- { .reg_offset = 0, .mask = INT1_ADCLOW_MASK, },
- { .reg_offset = 0, .mask = INT1_ADCERR_MASK, },
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, },
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, },
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, },
/* INT2 interrupts */
- { .reg_offset = 1, .mask = INT2_CHGTYP_MASK, },
- { .reg_offset = 1, .mask = INT2_CHGDETRUN_MASK, },
- { .reg_offset = 1, .mask = INT2_DCDTMR_MASK, },
- { .reg_offset = 1, .mask = INT2_DBCHG_MASK, },
- { .reg_offset = 1, .mask = INT2_VBVOLT_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, },
/* INT3 interrupts */
- { .reg_offset = 2, .mask = INT3_EOC_MASK, },
- { .reg_offset = 2, .mask = INT3_CGMBC_MASK, },
- { .reg_offset = 2, .mask = INT3_OVP_MASK, },
- { .reg_offset = 2, .mask = INT3_MBCCHGERR_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, },
};
static const struct regmap_irq_chip max14577_irq_chip = {
.name = "max14577",
.status_base = MAX14577_REG_INT1,
.mask_base = MAX14577_REG_INTMASK1,
- .mask_invert = 1,
+ .mask_invert = true,
.num_regs = 3,
.irqs = max14577_irqs,
.num_irqs = ARRAY_SIZE(max14577_irqs),
};
+static const struct regmap_irq max77836_muic_irqs[] = {
+ /* INT1 interrupts */
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, },
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, },
+ { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, },
+ { .reg_offset = 0, .mask = MAX77836_INT1_ADC1K_MASK, },
+ /* INT2 interrupts */
+ { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, },
+ { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, },
+ { .reg_offset = 1, .mask = MAX77836_INT2_VIDRM_MASK, },
+ /* INT3 interrupts */
+ { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, },
+ { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, },
+};
+
+static const struct regmap_irq_chip max77836_muic_irq_chip = {
+ .name = "max77836-muic",
+ .status_base = MAX14577_REG_INT1,
+ .mask_base = MAX14577_REG_INTMASK1,
+ .mask_invert = true,
+ .num_regs = 3,
+ .irqs = max77836_muic_irqs,
+ .num_irqs = ARRAY_SIZE(max77836_muic_irqs),
+};
+
+static const struct regmap_irq max77836_pmic_irqs[] = {
+ { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T120C_MASK, },
+ { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T140C_MASK, },
+};
+
+static const struct regmap_irq_chip max77836_pmic_irq_chip = {
+ .name = "max77836-pmic",
+ .status_base = MAX77836_PMIC_REG_TOPSYS_INT,
+ .mask_base = MAX77836_PMIC_REG_TOPSYS_INT_MASK,
+ .mask_invert = false,
+ .num_regs = 1,
+ .irqs = max77836_pmic_irqs,
+ .num_irqs = ARRAY_SIZE(max77836_pmic_irqs),
+};
+
+static void max14577_print_dev_type(struct max14577 *max14577)
+{
+ u8 reg_data, vendor_id, device_id;
+ int ret;
+
+ ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID,
+ &reg_data);
+ if (ret) {
+ dev_err(max14577->dev,
+ "Failed to read DEVICEID register: %d\n", ret);
+ return;
+ }
+
+ vendor_id = ((reg_data & DEVID_VENDORID_MASK) >>
+ DEVID_VENDORID_SHIFT);
+ device_id = ((reg_data & DEVID_DEVICEID_MASK) >>
+ DEVID_DEVICEID_SHIFT);
+
+ dev_info(max14577->dev, "Device type: %u (ID: 0x%x, vendor: 0x%x)\n",
+ max14577->dev_type, device_id, vendor_id);
+}
+
+/*
+ * Max77836 specific initialization code for driver probe.
+ * Adds new I2C dummy device, regmap and regmap IRQ chip.
+ * Unmasks Interrupt Source register.
+ *
+ * On success returns 0.
+ * On failure returns errno and reverts any changes done so far (e.g. remove
+ * I2C dummy device), except masking the INT SRC register.
+ */
+static int max77836_init(struct max14577 *max14577)
+{
+ int ret;
+ u8 intsrc_mask;
+
+ max14577->i2c_pmic = i2c_new_dummy(max14577->i2c->adapter,
+ I2C_ADDR_PMIC);
+ if (!max14577->i2c_pmic) {
+ dev_err(max14577->dev, "Failed to register PMIC I2C device\n");
+ return -ENODEV;
+ }
+ i2c_set_clientdata(max14577->i2c_pmic, max14577);
+
+ max14577->regmap_pmic = devm_regmap_init_i2c(max14577->i2c_pmic,
+ &max77836_pmic_regmap_config);
+ if (IS_ERR(max14577->regmap_pmic)) {
+ ret = PTR_ERR(max14577->regmap_pmic);
+ dev_err(max14577->dev, "Failed to allocate PMIC register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Un-mask MAX77836 Interrupt Source register */
+ ret = max14577_read_reg(max14577->regmap_pmic,
+ MAX77836_PMIC_REG_INTSRC_MASK, &intsrc_mask);
+ if (ret < 0) {
+ dev_err(max14577->dev, "Failed to read PMIC register\n");
+ goto err;
+ }
+
+ intsrc_mask &= ~(MAX77836_INTSRC_MASK_TOP_INT_MASK);
+ intsrc_mask &= ~(MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK);
+ ret = max14577_write_reg(max14577->regmap_pmic,
+ MAX77836_PMIC_REG_INTSRC_MASK, intsrc_mask);
+ if (ret < 0) {
+ dev_err(max14577->dev, "Failed to write PMIC register\n");
+ goto err;
+ }
+
+ ret = regmap_add_irq_chip(max14577->regmap_pmic, max14577->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED,
+ 0, &max77836_pmic_irq_chip,
+ &max14577->irq_data_pmic);
+ if (ret != 0) {
+ dev_err(max14577->dev, "Failed to request PMIC IRQ %d: %d\n",
+ max14577->irq, ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ i2c_unregister_device(max14577->i2c_pmic);
+
+ return ret;
+}
+
+/*
+ * Max77836 specific de-initialization code for driver remove.
+ */
+static void max77836_remove(struct max14577 *max14577)
+{
+ regmap_del_irq_chip(max14577->irq, max14577->irq_data_pmic);
+ i2c_unregister_device(max14577->i2c_pmic);
+}
+
static int max14577_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max14577 *max14577;
struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct device_node *np = i2c->dev.of_node;
- u8 reg_data;
int ret = 0;
+ const struct regmap_irq_chip *irq_chip;
+ struct mfd_cell *mfd_devs;
+ unsigned int mfd_devs_size;
+ int irq_flags;
if (np) {
pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL);
@@ -113,7 +317,8 @@ static int max14577_i2c_probe(struct i2c_client *i2c,
max14577->i2c = i2c;
max14577->irq = i2c->irq;
- max14577->regmap = devm_regmap_init_i2c(i2c, &max14577_regmap_config);
+ max14577->regmap = devm_regmap_init_i2c(i2c,
+ &max14577_muic_regmap_config);
if (IS_ERR(max14577->regmap)) {
ret = PTR_ERR(max14577->regmap);
dev_err(max14577->dev, "Failed to allocate register map: %d\n",
@@ -121,23 +326,36 @@ static int max14577_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID,
- &reg_data);
- if (ret) {
- dev_err(max14577->dev, "Device not found on this channel: %d\n",
- ret);
- return ret;
+ if (np) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(max14577_dt_match, &i2c->dev);
+ if (of_id)
+ max14577->dev_type = (unsigned int)of_id->data;
+ } else {
+ max14577->dev_type = id->driver_data;
+ }
+
+ max14577_print_dev_type(max14577);
+
+ switch (max14577->dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ irq_chip = &max77836_muic_irq_chip;
+ mfd_devs = max77836_devs;
+ mfd_devs_size = ARRAY_SIZE(max77836_devs);
+ irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
+ break;
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ irq_chip = &max14577_irq_chip;
+ mfd_devs = max14577_devs;
+ mfd_devs_size = ARRAY_SIZE(max14577_devs);
+ irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ break;
}
- max14577->vendor_id = ((reg_data & DEVID_VENDORID_MASK) >>
- DEVID_VENDORID_SHIFT);
- max14577->device_id = ((reg_data & DEVID_DEVICEID_MASK) >>
- DEVID_DEVICEID_SHIFT);
- dev_info(max14577->dev, "Device ID: 0x%x, vendor: 0x%x\n",
- max14577->device_id, max14577->vendor_id);
ret = regmap_add_irq_chip(max14577->regmap, max14577->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0,
- &max14577_irq_chip,
+ irq_flags, 0, irq_chip,
&max14577->irq_data);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
@@ -145,8 +363,15 @@ static int max14577_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(max14577->dev, -1, max14577_devs,
- ARRAY_SIZE(max14577_devs), NULL, 0,
+ /* Max77836 specific initialization code (additional regmap) */
+ if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) {
+ ret = max77836_init(max14577);
+ if (ret < 0)
+ goto err_max77836;
+ }
+
+ ret = mfd_add_devices(max14577->dev, -1, mfd_devs,
+ mfd_devs_size, NULL, 0,
regmap_irq_get_domain(max14577->irq_data));
if (ret < 0)
goto err_mfd;
@@ -156,6 +381,9 @@ static int max14577_i2c_probe(struct i2c_client *i2c,
return 0;
err_mfd:
+ if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
+ max77836_remove(max14577);
+err_max77836:
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
return ret;
@@ -167,12 +395,15 @@ static int max14577_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
+ if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
+ max77836_remove(max14577);
return 0;
}
static const struct i2c_device_id max14577_i2c_id[] = {
- { "max14577", 0 },
+ { "max14577", MAXIM_DEVICE_TYPE_MAX14577, },
+ { "max77836", MAXIM_DEVICE_TYPE_MAX77836, },
{ }
};
MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
@@ -215,11 +446,6 @@ static int max14577_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static struct of_device_id max14577_dt_match[] = {
- { .compatible = "maxim,max14577", },
- {},
-};
-
static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume);
static struct i2c_driver max14577_i2c_driver = {
@@ -236,6 +462,9 @@ static struct i2c_driver max14577_i2c_driver = {
static int __init max14577_i2c_init(void)
{
+ BUILD_BUG_ON(ARRAY_SIZE(max14577_i2c_id) != MAXIM_DEVICE_TYPE_NUM);
+ BUILD_BUG_ON(ARRAY_SIZE(max14577_dt_match) != MAXIM_DEVICE_TYPE_NUM);
+
return i2c_add_driver(&max14577_i2c_driver);
}
subsys_initcall(max14577_i2c_init);
@@ -247,5 +476,5 @@ static void __exit max14577_i2c_exit(void)
module_exit(max14577_i2c_exit);
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>");
-MODULE_DESCRIPTION("MAXIM 14577 multi-function core driver");
+MODULE_DESCRIPTION("Maxim 14577/77836 multi-function core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index dbea55de4397..e2a04bb8bc1e 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -119,6 +120,7 @@ static struct regmap_config syscon_regmap_config = {
static int syscon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct syscon_platform_data *pdata = dev_get_platdata(dev);
struct syscon *syscon;
struct resource *res;
void __iomem *base;
@@ -136,6 +138,8 @@ static int syscon_probe(struct platform_device *pdev)
return -ENOMEM;
syscon_regmap_config.max_register = res->end - res->start - 3;
+ if (pdata)
+ syscon_regmap_config.name = pdata->label;
syscon->regmap = devm_regmap_init_mmio(dev, base,
&syscon_regmap_config);
if (IS_ERR(syscon->regmap)) {
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index e87140bef667..db11b4f40611 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -98,7 +98,11 @@
#define TWL4030_BASEADD_BACKUP 0x0014
#define TWL4030_BASEADD_INT 0x002E
#define TWL4030_BASEADD_PM_MASTER 0x0036
+
#define TWL4030_BASEADD_PM_RECEIVER 0x005B
+#define TWL4030_DCDC_GLOBAL_CFG 0x06
+#define SMARTREFLEX_ENABLE BIT(3)
+
#define TWL4030_BASEADD_RTC 0x001C
#define TWL4030_BASEADD_SECURED_REG 0x0000
@@ -1204,6 +1208,11 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
* Disable TWL4030/TWL5030 I2C Pull-up on I2C1 and I2C4(SR) interface.
* Program I2C_SCL_CTRL_PU(bit 0)=0, I2C_SDA_CTRL_PU (bit 2)=0,
* SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0.
+ *
+ * Also, always enable SmartReflex bit as that's needed for omaps to
+ * to do anything over I2C4 for voltage scaling even if SmartReflex
+ * is disabled. Without the SmartReflex bit omap sys_clkreq idle
+ * signal will never trigger for retention idle.
*/
if (twl_class_is_4030()) {
u8 temp;
@@ -1212,6 +1221,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
temp &= ~(SR_I2C_SDA_CTRL_PU | SR_I2C_SCL_CTRL_PU | \
I2C_SDA_CTRL_PU | I2C_SCL_CTRL_PU);
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp,
+ TWL4030_DCDC_GLOBAL_CFG);
+ temp |= SMARTREFLEX_ENABLE;
+ twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp,
+ TWL4030_DCDC_GLOBAL_CFG);
}
if (node) {
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
deleted file mode 100644
index d0db89d13e01..000000000000
--- a/drivers/mfd/vexpress-config.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- */
-
-#define pr_fmt(fmt) "vexpress-config: " fmt
-
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/export.h>
-#include <linux/list.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/vexpress.h>
-
-
-#define VEXPRESS_CONFIG_MAX_BRIDGES 2
-
-static struct vexpress_config_bridge {
- struct device_node *node;
- struct vexpress_config_bridge_info *info;
- struct list_head transactions;
- spinlock_t transactions_lock;
-} vexpress_config_bridges[VEXPRESS_CONFIG_MAX_BRIDGES];
-
-static DECLARE_BITMAP(vexpress_config_bridges_map,
- ARRAY_SIZE(vexpress_config_bridges));
-static DEFINE_MUTEX(vexpress_config_bridges_mutex);
-
-struct vexpress_config_bridge *vexpress_config_bridge_register(
- struct device_node *node,
- struct vexpress_config_bridge_info *info)
-{
- struct vexpress_config_bridge *bridge;
- int i;
-
- pr_debug("Registering bridge '%s'\n", info->name);
-
- mutex_lock(&vexpress_config_bridges_mutex);
- i = find_first_zero_bit(vexpress_config_bridges_map,
- ARRAY_SIZE(vexpress_config_bridges));
- if (i >= ARRAY_SIZE(vexpress_config_bridges)) {
- pr_err("Can't register more bridges!\n");
- mutex_unlock(&vexpress_config_bridges_mutex);
- return NULL;
- }
- __set_bit(i, vexpress_config_bridges_map);
- bridge = &vexpress_config_bridges[i];
-
- bridge->node = node;
- bridge->info = info;
- INIT_LIST_HEAD(&bridge->transactions);
- spin_lock_init(&bridge->transactions_lock);
-
- mutex_unlock(&vexpress_config_bridges_mutex);
-
- return bridge;
-}
-EXPORT_SYMBOL(vexpress_config_bridge_register);
-
-void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
-{
- struct vexpress_config_bridge __bridge = *bridge;
- int i;
-
- mutex_lock(&vexpress_config_bridges_mutex);
- for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++)
- if (&vexpress_config_bridges[i] == bridge)
- __clear_bit(i, vexpress_config_bridges_map);
- mutex_unlock(&vexpress_config_bridges_mutex);
-
- WARN_ON(!list_empty(&__bridge.transactions));
- while (!list_empty(&__bridge.transactions))
- cpu_relax();
-}
-EXPORT_SYMBOL(vexpress_config_bridge_unregister);
-
-
-struct vexpress_config_func {
- struct vexpress_config_bridge *bridge;
- void *func;
-};
-
-struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
- struct device_node *node)
-{
- struct device_node *bridge_node;
- struct vexpress_config_func *func;
- int i;
-
- if (WARN_ON(dev && node && dev->of_node != node))
- return NULL;
- if (dev && !node)
- node = dev->of_node;
-
- func = kzalloc(sizeof(*func), GFP_KERNEL);
- if (!func)
- return NULL;
-
- bridge_node = of_node_get(node);
- while (bridge_node) {
- const __be32 *prop = of_get_property(bridge_node,
- "arm,vexpress,config-bridge", NULL);
-
- if (prop) {
- bridge_node = of_find_node_by_phandle(
- be32_to_cpup(prop));
- break;
- }
-
- bridge_node = of_get_next_parent(bridge_node);
- }
-
- mutex_lock(&vexpress_config_bridges_mutex);
- for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) {
- struct vexpress_config_bridge *bridge =
- &vexpress_config_bridges[i];
-
- if (test_bit(i, vexpress_config_bridges_map) &&
- bridge->node == bridge_node) {
- func->bridge = bridge;
- func->func = bridge->info->func_get(dev, node);
- break;
- }
- }
- mutex_unlock(&vexpress_config_bridges_mutex);
-
- if (!func->func) {
- of_node_put(node);
- kfree(func);
- return NULL;
- }
-
- return func;
-}
-EXPORT_SYMBOL(__vexpress_config_func_get);
-
-void vexpress_config_func_put(struct vexpress_config_func *func)
-{
- func->bridge->info->func_put(func->func);
- of_node_put(func->bridge->node);
- kfree(func);
-}
-EXPORT_SYMBOL(vexpress_config_func_put);
-
-struct vexpress_config_trans {
- struct vexpress_config_func *func;
- int offset;
- bool write;
- u32 *data;
- int status;
- struct completion completion;
- struct list_head list;
-};
-
-static void vexpress_config_dump_trans(const char *what,
- struct vexpress_config_trans *trans)
-{
- pr_debug("%s %s trans %p func 0x%p offset %d data 0x%x status %d\n",
- what, trans->write ? "write" : "read", trans,
- trans->func->func, trans->offset,
- trans->data ? *trans->data : 0, trans->status);
-}
-
-static int vexpress_config_schedule(struct vexpress_config_trans *trans)
-{
- int status;
- struct vexpress_config_bridge *bridge = trans->func->bridge;
- unsigned long flags;
-
- init_completion(&trans->completion);
- trans->status = -EFAULT;
-
- spin_lock_irqsave(&bridge->transactions_lock, flags);
-
- if (list_empty(&bridge->transactions)) {
- vexpress_config_dump_trans("Executing", trans);
- status = bridge->info->func_exec(trans->func->func,
- trans->offset, trans->write, trans->data);
- } else {
- vexpress_config_dump_trans("Queuing", trans);
- status = VEXPRESS_CONFIG_STATUS_WAIT;
- }
-
- switch (status) {
- case VEXPRESS_CONFIG_STATUS_DONE:
- vexpress_config_dump_trans("Finished", trans);
- trans->status = status;
- break;
- case VEXPRESS_CONFIG_STATUS_WAIT:
- list_add_tail(&trans->list, &bridge->transactions);
- break;
- }
-
- spin_unlock_irqrestore(&bridge->transactions_lock, flags);
-
- return status;
-}
-
-void vexpress_config_complete(struct vexpress_config_bridge *bridge,
- int status)
-{
- struct vexpress_config_trans *trans;
- unsigned long flags;
- const char *message = "Completed";
-
- spin_lock_irqsave(&bridge->transactions_lock, flags);
-
- trans = list_first_entry(&bridge->transactions,
- struct vexpress_config_trans, list);
- trans->status = status;
-
- do {
- vexpress_config_dump_trans(message, trans);
- list_del(&trans->list);
- complete(&trans->completion);
-
- if (list_empty(&bridge->transactions))
- break;
-
- trans = list_first_entry(&bridge->transactions,
- struct vexpress_config_trans, list);
- vexpress_config_dump_trans("Executing pending", trans);
- trans->status = bridge->info->func_exec(trans->func->func,
- trans->offset, trans->write, trans->data);
- message = "Finished pending";
- } while (trans->status == VEXPRESS_CONFIG_STATUS_DONE);
-
- spin_unlock_irqrestore(&bridge->transactions_lock, flags);
-}
-EXPORT_SYMBOL(vexpress_config_complete);
-
-int vexpress_config_wait(struct vexpress_config_trans *trans)
-{
- wait_for_completion(&trans->completion);
-
- return trans->status;
-}
-EXPORT_SYMBOL(vexpress_config_wait);
-
-int vexpress_config_read(struct vexpress_config_func *func, int offset,
- u32 *data)
-{
- struct vexpress_config_trans trans = {
- .func = func,
- .offset = offset,
- .write = false,
- .data = data,
- .status = 0,
- };
- int status = vexpress_config_schedule(&trans);
-
- if (status == VEXPRESS_CONFIG_STATUS_WAIT)
- status = vexpress_config_wait(&trans);
-
- return status;
-}
-EXPORT_SYMBOL(vexpress_config_read);
-
-int vexpress_config_write(struct vexpress_config_func *func, int offset,
- u32 data)
-{
- struct vexpress_config_trans trans = {
- .func = func,
- .offset = offset,
- .write = true,
- .data = &data,
- .status = 0,
- };
- int status = vexpress_config_schedule(&trans);
-
- if (status == VEXPRESS_CONFIG_STATUS_WAIT)
- status = vexpress_config_wait(&trans);
-
- return status;
-}
-EXPORT_SYMBOL(vexpress_config_write);
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 35281e804e7e..9e21e4fc9599 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -11,23 +11,22 @@
* Copyright (C) 2012 ARM Limited
*/
+#include <linux/basic_mmio_gpio.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/io.h>
-#include <linux/leds.h>
+#include <linux/mfd/core.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/timer.h>
#include <linux/vexpress.h>
#define SYS_ID 0x000
#define SYS_SW 0x004
#define SYS_LED 0x008
#define SYS_100HZ 0x024
-#define SYS_FLAGS 0x030
#define SYS_FLAGSSET 0x030
#define SYS_FLAGSCLR 0x034
#define SYS_NVFLAGS 0x038
@@ -46,465 +45,209 @@
#define SYS_CFGSTAT 0x0a8
#define SYS_HBI_MASK 0xfff
-#define SYS_ID_HBI_SHIFT 16
#define SYS_PROCIDx_HBI_SHIFT 0
-#define SYS_LED_LED(n) (1 << (n))
-
#define SYS_MCI_CARDIN (1 << 0)
#define SYS_MCI_WPROT (1 << 1)
-#define SYS_FLASH_WPn (1 << 0)
-
#define SYS_MISC_MASTERSITE (1 << 14)
-#define SYS_CFGCTRL_START (1 << 31)
-#define SYS_CFGCTRL_WRITE (1 << 30)
-#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
-#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
-#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
-#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
-#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
-
-#define SYS_CFGSTAT_ERR (1 << 1)
-#define SYS_CFGSTAT_COMPLETE (1 << 0)
-
-
-static void __iomem *vexpress_sysreg_base;
-static struct device *vexpress_sysreg_dev;
-static int vexpress_master_site;
-
-void vexpress_flags_set(u32 data)
-{
- writel(~0, vexpress_sysreg_base + SYS_FLAGSCLR);
- writel(data, vexpress_sysreg_base + SYS_FLAGSSET);
-}
+static void __iomem *__vexpress_sysreg_base;
-u32 vexpress_get_procid(int site)
+static void __iomem *vexpress_sysreg_base(void)
{
- if (site == VEXPRESS_SITE_MASTER)
- site = vexpress_master_site;
-
- return readl(vexpress_sysreg_base + (site == VEXPRESS_SITE_DB1 ?
- SYS_PROCID0 : SYS_PROCID1));
-}
+ if (!__vexpress_sysreg_base) {
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-sysreg");
-u32 vexpress_get_hbi(int site)
-{
- u32 id;
-
- switch (site) {
- case VEXPRESS_SITE_MB:
- id = readl(vexpress_sysreg_base + SYS_ID);
- return (id >> SYS_ID_HBI_SHIFT) & SYS_HBI_MASK;
- case VEXPRESS_SITE_MASTER:
- case VEXPRESS_SITE_DB1:
- case VEXPRESS_SITE_DB2:
- id = vexpress_get_procid(site);
- return (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
+ __vexpress_sysreg_base = of_iomap(node, 0);
}
- return ~0;
-}
+ WARN_ON(!__vexpress_sysreg_base);
-void __iomem *vexpress_get_24mhz_clock_base(void)
-{
- return vexpress_sysreg_base + SYS_24MHZ;
+ return __vexpress_sysreg_base;
}
-static void vexpress_sysreg_find_prop(struct device_node *node,
- const char *name, u32 *val)
+static int vexpress_sysreg_get_master(void)
{
- of_node_get(node);
- while (node) {
- if (of_property_read_u32(node, name, val) == 0) {
- of_node_put(node);
- return;
- }
- node = of_get_next_parent(node);
- }
-}
-
-unsigned __vexpress_get_site(struct device *dev, struct device_node *node)
-{
- u32 site = 0;
-
- WARN_ON(dev && node && dev->of_node != node);
- if (dev && !node)
- node = dev->of_node;
-
- if (node) {
- vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site);
- } else if (dev && dev->bus == &platform_bus_type) {
- struct platform_device *pdev = to_platform_device(dev);
-
- if (pdev->num_resources == 1 &&
- pdev->resource[0].flags == IORESOURCE_BUS)
- site = pdev->resource[0].start;
- } else if (dev && strncmp(dev_name(dev), "ct:", 3) == 0) {
- site = VEXPRESS_SITE_MASTER;
- }
+ if (readl(vexpress_sysreg_base() + SYS_MISC) & SYS_MISC_MASTERSITE)
+ return VEXPRESS_SITE_DB2;
- if (site == VEXPRESS_SITE_MASTER)
- site = vexpress_master_site;
-
- return site;
+ return VEXPRESS_SITE_DB1;
}
-
-struct vexpress_sysreg_config_func {
- u32 template;
- u32 device;
-};
-
-static struct vexpress_config_bridge *vexpress_sysreg_config_bridge;
-static struct timer_list vexpress_sysreg_config_timer;
-static u32 *vexpress_sysreg_config_data;
-static int vexpress_sysreg_config_tries;
-
-static void *vexpress_sysreg_config_func_get(struct device *dev,
- struct device_node *node)
+void vexpress_flags_set(u32 data)
{
- struct vexpress_sysreg_config_func *config_func;
- u32 site = 0;
- u32 position = 0;
- u32 dcc = 0;
- u32 func_device[2];
- int err = -EFAULT;
-
- if (node) {
- of_node_get(node);
- vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site);
- vexpress_sysreg_find_prop(node, "arm,vexpress,position",
- &position);
- vexpress_sysreg_find_prop(node, "arm,vexpress,dcc", &dcc);
- err = of_property_read_u32_array(node,
- "arm,vexpress-sysreg,func", func_device,
- ARRAY_SIZE(func_device));
- of_node_put(node);
- } else if (dev && dev->bus == &platform_bus_type) {
- struct platform_device *pdev = to_platform_device(dev);
-
- if (pdev->num_resources == 1 &&
- pdev->resource[0].flags == IORESOURCE_BUS) {
- site = pdev->resource[0].start;
- func_device[0] = pdev->resource[0].end;
- func_device[1] = pdev->id;
- err = 0;
- }
- }
- if (err)
- return NULL;
-
- config_func = kzalloc(sizeof(*config_func), GFP_KERNEL);
- if (!config_func)
- return NULL;
-
- config_func->template = SYS_CFGCTRL_DCC(dcc);
- config_func->template |= SYS_CFGCTRL_FUNC(func_device[0]);
- config_func->template |= SYS_CFGCTRL_SITE(site == VEXPRESS_SITE_MASTER ?
- vexpress_master_site : site);
- config_func->template |= SYS_CFGCTRL_POSITION(position);
- config_func->device |= func_device[1];
-
- dev_dbg(vexpress_sysreg_dev, "func 0x%p = 0x%x, %d\n", config_func,
- config_func->template, config_func->device);
-
- return config_func;
+ writel(~0, vexpress_sysreg_base() + SYS_FLAGSCLR);
+ writel(data, vexpress_sysreg_base() + SYS_FLAGSSET);
}
-static void vexpress_sysreg_config_func_put(void *func)
+unsigned int vexpress_get_mci_cardin(struct device *dev)
{
- kfree(func);
+ return readl(vexpress_sysreg_base() + SYS_MCI) & SYS_MCI_CARDIN;
}
-static int vexpress_sysreg_config_func_exec(void *func, int offset,
- bool write, u32 *data)
+u32 vexpress_get_procid(int site)
{
- int status;
- struct vexpress_sysreg_config_func *config_func = func;
- u32 command;
-
- if (WARN_ON(!vexpress_sysreg_base))
- return -ENOENT;
-
- command = readl(vexpress_sysreg_base + SYS_CFGCTRL);
- if (WARN_ON(command & SYS_CFGCTRL_START))
- return -EBUSY;
-
- command = SYS_CFGCTRL_START;
- command |= write ? SYS_CFGCTRL_WRITE : 0;
- command |= config_func->template;
- command |= SYS_CFGCTRL_DEVICE(config_func->device + offset);
-
- /* Use a canary for reads */
- if (!write)
- *data = 0xdeadbeef;
-
- dev_dbg(vexpress_sysreg_dev, "command %x, data %x\n",
- command, *data);
- writel(*data, vexpress_sysreg_base + SYS_CFGDATA);
- writel(0, vexpress_sysreg_base + SYS_CFGSTAT);
- writel(command, vexpress_sysreg_base + SYS_CFGCTRL);
- mb();
-
- if (vexpress_sysreg_dev) {
- /* Schedule completion check */
- if (!write)
- vexpress_sysreg_config_data = data;
- vexpress_sysreg_config_tries = 100;
- mod_timer(&vexpress_sysreg_config_timer,
- jiffies + usecs_to_jiffies(100));
- status = VEXPRESS_CONFIG_STATUS_WAIT;
- } else {
- /* Early execution, no timer available, have to spin */
- u32 cfgstat;
-
- do {
- cpu_relax();
- cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT);
- } while (!cfgstat);
-
- if (!write && (cfgstat & SYS_CFGSTAT_COMPLETE))
- *data = readl(vexpress_sysreg_base + SYS_CFGDATA);
- status = VEXPRESS_CONFIG_STATUS_DONE;
-
- if (cfgstat & SYS_CFGSTAT_ERR)
- status = -EINVAL;
- }
+ if (site == VEXPRESS_SITE_MASTER)
+ site = vexpress_sysreg_get_master();
- return status;
+ return readl(vexpress_sysreg_base() + (site == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
}
-struct vexpress_config_bridge_info vexpress_sysreg_config_bridge_info = {
- .name = "vexpress-sysreg",
- .func_get = vexpress_sysreg_config_func_get,
- .func_put = vexpress_sysreg_config_func_put,
- .func_exec = vexpress_sysreg_config_func_exec,
-};
-
-static void vexpress_sysreg_config_complete(unsigned long data)
+void __iomem *vexpress_get_24mhz_clock_base(void)
{
- int status = VEXPRESS_CONFIG_STATUS_DONE;
- u32 cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT);
-
- if (cfgstat & SYS_CFGSTAT_ERR)
- status = -EINVAL;
- if (!vexpress_sysreg_config_tries--)
- status = -ETIMEDOUT;
-
- if (status < 0) {
- dev_err(vexpress_sysreg_dev, "error %d\n", status);
- } else if (!(cfgstat & SYS_CFGSTAT_COMPLETE)) {
- mod_timer(&vexpress_sysreg_config_timer,
- jiffies + usecs_to_jiffies(50));
- return;
- }
-
- if (vexpress_sysreg_config_data) {
- *vexpress_sysreg_config_data = readl(vexpress_sysreg_base +
- SYS_CFGDATA);
- dev_dbg(vexpress_sysreg_dev, "read data %x\n",
- *vexpress_sysreg_config_data);
- vexpress_sysreg_config_data = NULL;
- }
-
- vexpress_config_complete(vexpress_sysreg_config_bridge, status);
+ return vexpress_sysreg_base() + SYS_24MHZ;
}
-void vexpress_sysreg_setup(struct device_node *node)
-{
- if (WARN_ON(!vexpress_sysreg_base))
- return;
-
- if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
- vexpress_master_site = VEXPRESS_SITE_DB2;
- else
- vexpress_master_site = VEXPRESS_SITE_DB1;
-
- vexpress_sysreg_config_bridge = vexpress_config_bridge_register(
- node, &vexpress_sysreg_config_bridge_info);
- WARN_ON(!vexpress_sysreg_config_bridge);
-}
-
void __init vexpress_sysreg_early_init(void __iomem *base)
{
- vexpress_sysreg_base = base;
- vexpress_sysreg_setup(NULL);
-}
-
-void __init vexpress_sysreg_of_early_init(void)
-{
- struct device_node *node;
-
- if (vexpress_sysreg_base)
- return;
+ __vexpress_sysreg_base = base;
- node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg");
- if (node) {
- vexpress_sysreg_base = of_iomap(node, 0);
- vexpress_sysreg_setup(node);
- }
+ vexpress_config_set_master(vexpress_sysreg_get_master());
}
-#ifdef CONFIG_GPIOLIB
-
-#define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \
- [VEXPRESS_GPIO_##_name] = { \
- .reg = _reg, \
- .value = _reg##_##_value, \
- }
+/* The sysreg block is just a random collection of various functions... */
-static struct vexpress_sysreg_gpio {
- unsigned long reg;
- u32 value;
-} vexpress_sysreg_gpios[] = {
- VEXPRESS_SYSREG_GPIO(MMC_CARDIN, SYS_MCI, CARDIN),
- VEXPRESS_SYSREG_GPIO(MMC_WPROT, SYS_MCI, WPROT),
- VEXPRESS_SYSREG_GPIO(FLASH_WPn, SYS_FLASH, WPn),
- VEXPRESS_SYSREG_GPIO(LED0, SYS_LED, LED(0)),
- VEXPRESS_SYSREG_GPIO(LED1, SYS_LED, LED(1)),
- VEXPRESS_SYSREG_GPIO(LED2, SYS_LED, LED(2)),
- VEXPRESS_SYSREG_GPIO(LED3, SYS_LED, LED(3)),
- VEXPRESS_SYSREG_GPIO(LED4, SYS_LED, LED(4)),
- VEXPRESS_SYSREG_GPIO(LED5, SYS_LED, LED(5)),
- VEXPRESS_SYSREG_GPIO(LED6, SYS_LED, LED(6)),
- VEXPRESS_SYSREG_GPIO(LED7, SYS_LED, LED(7)),
+static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = {
+ .label = "sys_id",
};
-static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- return 0;
-}
-
-static int vexpress_sysreg_gpio_get(struct gpio_chip *chip,
- unsigned offset)
-{
- struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset];
- u32 reg_value = readl(vexpress_sysreg_base + gpio->reg);
-
- return !!(reg_value & gpio->value);
-}
-
-static void vexpress_sysreg_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset];
- u32 reg_value = readl(vexpress_sysreg_base + gpio->reg);
-
- if (value)
- reg_value |= gpio->value;
- else
- reg_value &= ~gpio->value;
-
- writel(reg_value, vexpress_sysreg_base + gpio->reg);
-}
-
-static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- vexpress_sysreg_gpio_set(chip, offset, value);
-
- return 0;
-}
-
-static struct gpio_chip vexpress_sysreg_gpio_chip = {
- .label = "vexpress-sysreg",
- .direction_input = vexpress_sysreg_gpio_direction_input,
- .direction_output = vexpress_sysreg_gpio_direction_output,
- .get = vexpress_sysreg_gpio_get,
- .set = vexpress_sysreg_gpio_set,
- .ngpio = ARRAY_SIZE(vexpress_sysreg_gpios),
- .base = 0,
+static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = {
+ .label = "sys_led",
+ .base = -1,
+ .ngpio = 8,
};
-
-#define VEXPRESS_SYSREG_GREEN_LED(_name, _default_trigger, _gpio) \
- { \
- .name = "v2m:green:"_name, \
- .default_trigger = _default_trigger, \
- .gpio = VEXPRESS_GPIO_##_gpio, \
- }
-
-struct gpio_led vexpress_sysreg_leds[] = {
- VEXPRESS_SYSREG_GREEN_LED("user1", "heartbeat", LED0),
- VEXPRESS_SYSREG_GREEN_LED("user2", "mmc0", LED1),
- VEXPRESS_SYSREG_GREEN_LED("user3", "cpu0", LED2),
- VEXPRESS_SYSREG_GREEN_LED("user4", "cpu1", LED3),
- VEXPRESS_SYSREG_GREEN_LED("user5", "cpu2", LED4),
- VEXPRESS_SYSREG_GREEN_LED("user6", "cpu3", LED5),
- VEXPRESS_SYSREG_GREEN_LED("user7", "cpu4", LED6),
- VEXPRESS_SYSREG_GREEN_LED("user8", "cpu5", LED7),
+static struct bgpio_pdata vexpress_sysreg_sys_mci_pdata = {
+ .label = "sys_mci",
+ .base = -1,
+ .ngpio = 2,
};
-struct gpio_led_platform_data vexpress_sysreg_leds_pdata = {
- .num_leds = ARRAY_SIZE(vexpress_sysreg_leds),
- .leds = vexpress_sysreg_leds,
+static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = {
+ .label = "sys_flash",
+ .base = -1,
+ .ngpio = 1,
};
-#endif
-
+static struct syscon_platform_data vexpress_sysreg_sys_misc_pdata = {
+ .label = "sys_misc",
+};
-static ssize_t vexpress_sysreg_sys_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "0x%08x\n", readl(vexpress_sysreg_base + SYS_ID));
-}
+static struct syscon_platform_data vexpress_sysreg_sys_procid_pdata = {
+ .label = "sys_procid",
+};
-DEVICE_ATTR(sys_id, S_IRUGO, vexpress_sysreg_sys_id_show, NULL);
+static struct mfd_cell vexpress_sysreg_cells[] = {
+ {
+ .name = "syscon",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM(SYS_ID, 0x4),
+ },
+ .platform_data = &vexpress_sysreg_sys_id_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_id_pdata),
+ }, {
+ .name = "basic-mmio-gpio",
+ .of_compatible = "arm,vexpress-sysreg,sys_led",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"),
+ },
+ .platform_data = &vexpress_sysreg_sys_led_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata),
+ }, {
+ .name = "basic-mmio-gpio",
+ .of_compatible = "arm,vexpress-sysreg,sys_mci",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"),
+ },
+ .platform_data = &vexpress_sysreg_sys_mci_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata),
+ }, {
+ .name = "basic-mmio-gpio",
+ .of_compatible = "arm,vexpress-sysreg,sys_flash",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"),
+ },
+ .platform_data = &vexpress_sysreg_sys_flash_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata),
+ }, {
+ .name = "syscon",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM(SYS_MISC, 0x4),
+ },
+ .platform_data = &vexpress_sysreg_sys_misc_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_misc_pdata),
+ }, {
+ .name = "syscon",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM(SYS_PROCID0, 0x8),
+ },
+ .platform_data = &vexpress_sysreg_sys_procid_pdata,
+ .pdata_size = sizeof(vexpress_sysreg_sys_procid_pdata),
+ }, {
+ .name = "vexpress-syscfg",
+ .num_resources = 1,
+ .resources = (struct resource []) {
+ DEFINE_RES_MEM(SYS_CFGDATA, 0xc),
+ },
+ }
+};
static int vexpress_sysreg_probe(struct platform_device *pdev)
{
- int err;
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 0);
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to request memory region!\n");
- return -EBUSY;
- }
+ struct resource *mem;
+ void __iomem *base;
+ struct bgpio_chip *mmc_gpio_chip;
+ u32 dt_hbi;
- if (!vexpress_sysreg_base) {
- vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- vexpress_sysreg_setup(pdev->dev.of_node);
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
- if (!vexpress_sysreg_base) {
- dev_err(&pdev->dev, "Failed to obtain base address!\n");
- return -EFAULT;
- }
+ base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!base)
+ return -ENOMEM;
- setup_timer(&vexpress_sysreg_config_timer,
- vexpress_sysreg_config_complete, 0);
+ vexpress_config_set_master(vexpress_sysreg_get_master());
- vexpress_sysreg_dev = &pdev->dev;
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER);
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-#ifdef CONFIG_GPIOLIB
- vexpress_sysreg_gpio_chip.dev = &pdev->dev;
- err = gpiochip_add(&vexpress_sysreg_gpio_chip);
- if (err) {
- vexpress_config_bridge_unregister(
- vexpress_sysreg_config_bridge);
- dev_err(&pdev->dev, "Failed to register GPIO chip! (%d)\n",
- err);
- return err;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
}
- platform_device_register_data(vexpress_sysreg_dev, "leds-gpio",
- PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata,
- sizeof(vexpress_sysreg_leds_pdata));
-#endif
-
- device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
-
- return 0;
+ /*
+ * Duplicated SYS_MCI pseudo-GPIO controller for compatibility with
+ * older trees using sysreg node for MMC control lines.
+ */
+ mmc_gpio_chip = devm_kzalloc(&pdev->dev, sizeof(*mmc_gpio_chip),
+ GFP_KERNEL);
+ if (!mmc_gpio_chip)
+ return -ENOMEM;
+ bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
+ NULL, NULL, NULL, NULL, 0);
+ mmc_gpio_chip->gc.ngpio = 2;
+ gpiochip_add(&mmc_gpio_chip->gc);
+
+ return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ vexpress_sysreg_cells,
+ ARRAY_SIZE(vexpress_sysreg_cells), mem, 0, NULL);
}
static const struct of_device_id vexpress_sysreg_match[] = {
@@ -522,7 +265,12 @@ static struct platform_driver vexpress_sysreg_driver = {
static int __init vexpress_sysreg_init(void)
{
- vexpress_sysreg_of_early_init();
+ struct device_node *node;
+
+ /* Need the sysreg early, before any other device... */
+ for_each_matching_node(node, vexpress_sysreg_match)
+ of_platform_device_create(node, NULL, NULL);
+
return platform_driver_register(&vexpress_sysreg_driver);
}
core_initcall(vexpress_sysreg_init);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8baff0effc7d..a43d0c467274 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on HAVE_CLK
+ depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
help
This option enables device driver support for the PWM channels
on certain Atmel processors. Pulse Width Modulation is used for
@@ -200,7 +201,7 @@ config ICS932S401
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST)
---help---
This option enables device driver support for Atmel Synchronized
Serial Communication peripheral (SSC).
@@ -468,7 +469,7 @@ config BMP085_SPI
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
@@ -515,6 +516,15 @@ config SRAM
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config VEXPRESS_SYSCFG
+ bool "Versatile Express System Configuration driver"
+ depends on VEXPRESS_CONFIG
+ default y
+ help
+ ARM Ltd. Versatile Express uses specialised platform configuration
+ bus. System Configuration interface is one of the possible means
+ of generating transactions on this bus.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7eb4b69580c0..d59ce1261b38 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
+obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index b7ebf8021d99..c72e96b523ed 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -366,11 +367,17 @@ static const struct dev_pm_ops charlcd_pm_ops = {
.resume = charlcd_resume,
};
+static const struct of_device_id charlcd_match[] = {
+ { .compatible = "arm,versatile-lcd", },
+ {}
+};
+
static struct platform_driver charlcd_driver = {
.driver = {
.name = DRIVERNAME,
.owner = THIS_MODULE,
.pm = &charlcd_pm_ops,
+ .of_match_table = of_match_ptr(charlcd_match),
},
.remove = __exit_p(charlcd_remove),
};
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 6a672f9ef522..b909fb30232a 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -85,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
- char *endp;
u64 val;
__le32 val_le;
int rc;
@@ -93,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
/* Decode input */
- val = simple_strtoull(buf, &endp, 0);
- if (buf == endp) {
+ rc = kstrtoull(buf, 0, &val);
+ if (rc < 0) {
dev_dbg(dev, "input string not a number\n");
return -EINVAL;
}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 50d2096ea1c7..0a33ade64109 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -348,7 +348,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
char name[64];
unsigned int i;
- sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx);
+ sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx);
root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
if (!root) {
@@ -454,7 +454,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
}
for (i = 0; i < GENWQE_MAX_VFS; i++) {
- sprintf(name, "vf%d_jobtimeout_msec", i);
+ sprintf(name, "vf%u_jobtimeout_msec", i);
file = debugfs_create_u32(name, 0666, root,
&cd->vf_jobtimeout_msec[i]);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index d049d271699c..62cc6bb3f62e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -454,7 +454,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
*/
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
- int rc;
+ int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
@@ -718,7 +718,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
- rc = pci_enable_msi_block(pci_dev, count);
+ rc = pci_enable_msi_exact(pci_dev, count);
if (rc == 0)
cd->flags |= GENWQE_FLAG_MSI_ENABLED;
return rc;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index b8deb3455480..0d6234db00fa 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -111,8 +111,6 @@ int mei_amthif_host_init(struct mei_device *dev)
return ret;
}
- cl->state = MEI_FILE_CONNECTING;
-
ret = mei_cl_connect(cl, NULL);
dev->iamthif_state = MEI_IAMTHIF_IDLE;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index ddc5ac92a200..0e993ef28b94 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -247,7 +247,7 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
return id;
if (length > dev->me_clients[id].props.max_msg_length)
- return -EINVAL;
+ return -EFBIG;
cb = mei_io_cb_init(cl, NULL);
if (!cb)
@@ -427,8 +427,6 @@ int mei_cl_enable_device(struct mei_cl_device *device)
mutex_lock(&dev->device_lock);
- cl->state = MEI_FILE_CONNECTING;
-
err = mei_cl_connect(cl, NULL);
if (err < 0) {
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 8c078b808cd3..59d20c599b16 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -415,6 +416,10 @@ void mei_host_client_init(struct work_struct *work)
dev->reset_count = 0;
mutex_unlock(&dev->device_lock);
+
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
+ pm_runtime_autosuspend(&dev->pdev->dev);
}
/**
@@ -425,6 +430,12 @@ void mei_host_client_init(struct work_struct *work)
*/
bool mei_hbuf_acquire(struct mei_device *dev)
{
+ if (mei_pg_state(dev) == MEI_PG_ON ||
+ dev->pg_event == MEI_PG_EVENT_WAIT) {
+ dev_dbg(&dev->pdev->dev, "device is in pg\n");
+ return false;
+ }
+
if (!dev->hbuf_is_ready) {
dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
return false;
@@ -460,9 +471,18 @@ int mei_cl_disconnect(struct mei_cl *cl)
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
+ if (!cb) {
+ rets = -ENOMEM;
+ goto free;
+ }
cb->fop_type = MEI_FOP_CLOSE;
if (mei_hbuf_acquire(dev)) {
@@ -494,8 +514,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
cl_err(dev, cl, "wrong status client disconnect.\n");
if (err)
- cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
- err);
+ cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
cl_err(dev, cl, "failed to disconnect from FW client.\n");
}
@@ -503,6 +522,10 @@ int mei_cl_disconnect(struct mei_cl *cl)
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
mei_io_cb_free(cb);
return rets;
}
@@ -557,6 +580,13 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
dev = cl->dev;
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
@@ -567,6 +597,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
+ cl->state = MEI_FILE_CONNECTING;
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
@@ -596,6 +627,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
rets = cl->status;
out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
mei_io_cb_free(cb);
return rets;
}
@@ -713,23 +748,31 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
return -ENOTTY;
}
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
/* always allocate at least client max message */
length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
rets = mei_io_cb_alloc_resp_buf(cb, length);
if (rets)
- goto err;
+ goto out;
cb->fop_type = MEI_FOP_READ;
if (mei_hbuf_acquire(dev)) {
- if (mei_hbm_cl_flow_control_req(dev, cl)) {
- cl_err(dev, cl, "flow control send failed\n");
- rets = -ENODEV;
- goto err;
- }
+ rets = mei_hbm_cl_flow_control_req(dev, cl);
+ if (rets < 0)
+ goto out;
+
list_add_tail(&cb->list, &dev->read_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
@@ -737,9 +780,14 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
cl->read_cb = cb;
- return rets;
-err:
- mei_io_cb_free(cb);
+out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
+ if (rets)
+ mei_io_cb_free(cb);
+
return rets;
}
@@ -776,7 +824,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return rets;
if (rets == 0) {
- cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
return 0;
}
@@ -856,6 +904,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
cb->fop_type = MEI_FOP_WRITE;
cb->buf_idx = 0;
@@ -926,6 +980,10 @@ out:
rets = buf->size;
err:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
return rets;
}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 4960288e543a..804106209d76 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -14,10 +14,12 @@
*
*/
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mei.h>
+#include <linux/pm_runtime.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -58,6 +60,34 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
}
/**
+ * mei_hbm_idle - set hbm to idle state
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_idle(struct mei_device *dev)
+{
+ dev->init_clients_timer = 0;
+ dev->hbm_state = MEI_HBM_IDLE;
+}
+
+/**
+ * mei_hbm_reset - reset hbm counters and book keeping data structurs
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_reset(struct mei_device *dev)
+{
+ dev->me_clients_num = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+
+ kfree(dev->me_clients);
+ dev->me_clients = NULL;
+
+ mei_hbm_idle(dev);
+}
+
+/**
* mei_hbm_me_cl_allocate - allocates storage for me clients
*
* @dev: the device structure
@@ -69,9 +99,7 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev)
struct mei_me_client *clients;
int b;
- dev->me_clients_num = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
+ mei_hbm_reset(dev);
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
@@ -80,9 +108,6 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev)
if (dev->me_clients_num == 0)
return 0;
- kfree(dev->me_clients);
- dev->me_clients = NULL;
-
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n",
dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
@@ -133,17 +158,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
}
-/**
- * mei_hbm_idle - set hbm to idle state
- *
- * @dev: the device structure
- */
-void mei_hbm_idle(struct mei_device *dev)
-{
- dev->init_clients_timer = 0;
- dev->hbm_state = MEI_HBM_IDLE;
-}
-
int mei_hbm_start_wait(struct mei_device *dev)
{
int ret;
@@ -289,6 +303,34 @@ static int mei_hbm_prop_req(struct mei_device *dev)
return 0;
}
+/*
+ * mei_hbm_pg - sends pg command
+ *
+ * @dev: the device structure
+ * @pg_cmd: the pg command code
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_power_gate *req;
+ const size_t len = sizeof(struct hbm_power_gate);
+ int ret;
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ req = (struct hbm_power_gate *)dev->wr_msg.data;
+ memset(req, 0, len);
+ req->hbm_cmd = pg_cmd;
+
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret)
+ dev_err(&dev->pdev->dev, "power gate command write failed.\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mei_hbm_pg);
+
/**
* mei_hbm_stop_req - send stop request message
*
@@ -701,6 +743,27 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_hbm_cl_flow_control_res(dev, flow_control);
break;
+ case MEI_PG_ISOLATION_ENTRY_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n");
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+ break;
+
+ case MEI_PG_ISOLATION_EXIT_REQ_CMD:
+ dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n");
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+ else
+ /*
+ * If the driver is not waiting on this then
+ * this is HW initiated exit from PG.
+ * Start runtime pm resume sequence to exit from PG.
+ */
+ pm_request_resume(&dev->pdev->dev);
+ break;
+
case HOST_CLIENT_PROPERTIES_RES_CMD:
dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n");
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 20e8782711c0..683eb2835cec 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -50,6 +50,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
}
void mei_hbm_idle(struct mei_device *dev);
+void mei_hbm_reset(struct mei_device *dev);
int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
@@ -57,6 +58,7 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
bool mei_hbm_version_is_supported(struct mei_device *dev);
+int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
#endif /* _MEI_HBM_H_ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index cabc04383685..a7856c0ac576 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -133,6 +133,8 @@
#define ME_CB_RW 8
/* ME_CSR_HA - ME Control Status Host Access register (read only) */
#define ME_CSR_HA 0xC
+/* H_HGC_CSR - PGI register */
+#define H_HPG_CSR 0x10
/* register bits of H_CSR (Host Control Status register) */
@@ -162,6 +164,8 @@ access to ME_CBD */
#define ME_CBWP_HRA 0x00FF0000
/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
#define ME_CBRP_HRA 0x0000FF00
+/* ME Power Gate Isolation Capability HRA - host ready only access */
+#define ME_PGIC_HRA 0x00000040
/* ME Reset HRA - host read only access to ME_RST */
#define ME_RST_HRA 0x00000010
/* ME Ready HRA - host read only access to ME_RDY */
@@ -173,4 +177,9 @@ access to ME_CBD */
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
+
+/* register bits - H_HPG_CSR */
+#define H_HPG_CSR_PGIHEXR 0x00000001
+#define H_HPG_CSR_PGI 0x00000002
+
#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 8dbdaaef1af5..6a2d272cea43 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -109,10 +109,27 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
*/
static void mei_me_hw_config(struct mei_device *dev)
{
+ struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(to_me_hw(dev));
/* Doesn't change in runtime */
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+
+ hw->pg_state = MEI_PG_OFF;
+}
+
+/**
+ * mei_me_pg_state - translate internal pg state
+ * to the mei power gating state
+ *
+ * @hw - me hardware
+ * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ return hw->pg_state;
}
+
/**
* mei_clear_interrupts - clear and stop interrupts
*
@@ -164,6 +181,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(hw, hcsr);
+
+ /* complete this write before we set host ready on another CPU */
+ mmiowb();
}
/**
* mei_me_hw_reset - resets fw via mei csr register.
@@ -183,8 +203,21 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
else
hcsr &= ~H_IE;
+ dev->recvd_hw_ready = false;
mei_me_reg_write(hw, H_CSR, hcsr);
+ /*
+ * Host reads the H_CSR once to ensure that the
+ * posted write to H_CSR completes.
+ */
+ hcsr = mei_hcsr_read(hw);
+
+ if ((hcsr & H_RST) == 0)
+ dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
+
+ if ((hcsr & H_RDY) == H_RDY)
+ dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+
if (intr_enable == false)
mei_me_hw_reset_release(dev);
@@ -201,6 +234,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
static void mei_me_host_set_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state = mei_hcsr_read(hw);
hw->host_hw_state |= H_IE | H_IG | H_RDY;
mei_hcsr_set(hw, hw->host_hw_state);
}
@@ -233,10 +267,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
static int mei_me_hw_ready_wait(struct mei_device *dev)
{
int err;
- if (mei_me_hw_is_ready(dev))
- return 0;
- dev->recvd_hw_ready = false;
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
@@ -431,6 +462,144 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
}
/**
+ * mei_me_pg_enter - write pg enter register to mei device.
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_enter(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+ reg |= H_HPG_CSR_PGI;
+ mei_me_reg_write(hw, H_HPG_CSR, reg);
+}
+
+/**
+ * mei_me_pg_enter - write pg enter register to mei device.
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_exit(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+
+ WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
+
+ reg |= H_HPG_CSR_PGIHEXR;
+ mei_me_reg_write(hw, H_HPG_CSR, reg);
+}
+
+/**
+ * mei_me_pg_set_sync - perform pg entry procedure
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success an error code otherwise
+ */
+int mei_me_pg_set_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ int ret;
+
+ dev->pg_event = MEI_PG_EVENT_WAIT;
+
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
+ mei_me_pg_enter(dev);
+ ret = 0;
+ } else {
+ ret = -ETIME;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ hw->pg_state = MEI_PG_ON;
+
+ return ret;
+}
+
+/**
+ * mei_me_pg_unset_sync - perform pg exit procedure
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success an error code otherwise
+ */
+int mei_me_pg_unset_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ int ret;
+
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
+ goto reply;
+
+ dev->pg_event = MEI_PG_EVENT_WAIT;
+
+ mei_me_pg_exit(dev);
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+reply:
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ else
+ ret = -ETIME;
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ hw->pg_state = MEI_PG_OFF;
+
+ return ret;
+}
+
+/**
+ * mei_me_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * returns: true is pg supported, false otherwise
+ */
+static bool mei_me_pg_is_enabled(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
+
+ if ((reg & ME_PGIC_HRA) == 0)
+ goto notsupported;
+
+ if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
+ goto notsupported;
+
+ if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
+ dev->version.minor_version < HBM_MINOR_VERSION_PGI)
+ goto notsupported;
+
+ return true;
+
+notsupported:
+ dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
+ !!(reg & ME_PGIC_HRA),
+ dev->version.major_version,
+ dev->version.minor_version,
+ HBM_MAJOR_VERSION_PGI,
+ HBM_MINOR_VERSION_PGI);
+
+ return false;
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -491,14 +660,13 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
+ mei_me_hw_reset_release(dev);
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
wake_up_interruptible(&dev->wait_hw_ready);
} else {
-
- dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
- mei_me_hw_reset_release(dev);
+ dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
}
goto end;
}
@@ -524,9 +692,15 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
- rets = mei_irq_write_handler(dev, &complete_list);
-
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ /*
+ * During PG handshake only allowed write is the replay to the
+ * PG exit message, so block calling write function
+ * if the pg state is not idle
+ */
+ if (dev->pg_event == MEI_PG_EVENT_IDLE) {
+ rets = mei_irq_write_handler(dev, &complete_list);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ }
mei_irq_compl_handler(dev, &complete_list);
@@ -535,8 +709,65 @@ end:
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
+
+/**
+ * mei_me_fw_status - retrieve fw status from the pci config space
+ *
+ * @dev: the device structure
+ * @fw_status: fw status registers storage
+ *
+ * returns 0 on success an error code otherwise
+ */
+static int mei_me_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
+ int i;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ switch (dev->pdev->device) {
+ case MEI_DEV_ID_IBXPK_1:
+ case MEI_DEV_ID_IBXPK_2:
+ case MEI_DEV_ID_CPT_1:
+ case MEI_DEV_ID_PBG_1:
+ case MEI_DEV_ID_PPT_1:
+ case MEI_DEV_ID_PPT_2:
+ case MEI_DEV_ID_PPT_3:
+ case MEI_DEV_ID_LPT_H:
+ case MEI_DEV_ID_LPT_W:
+ case MEI_DEV_ID_LPT_LP:
+ case MEI_DEV_ID_LPT_HR:
+ case MEI_DEV_ID_WPT_LP:
+ fw_status->count = 2;
+ break;
+ case MEI_DEV_ID_ICH10_1:
+ case MEI_DEV_ID_ICH10_2:
+ case MEI_DEV_ID_ICH10_3:
+ case MEI_DEV_ID_ICH10_4:
+ fw_status->count = 1;
+ break;
+ default:
+ fw_status->count = 0;
+ break;
+ }
+
+ for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ pci_cfg_reg[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static const struct mei_hw_ops mei_me_hw_ops = {
+ .pg_state = mei_me_pg_state,
+
+ .fw_status = mei_me_fw_status,
.host_is_ready = mei_me_host_is_ready,
.hw_is_ready = mei_me_hw_is_ready,
@@ -544,6 +775,8 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
+ .pg_is_enabled = mei_me_pg_is_enabled,
+
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
@@ -559,14 +792,81 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.read = mei_me_read_slots
};
+static bool mei_me_fw_type_nm(struct pci_dev *pdev)
+{
+ u32 reg;
+ pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
+ return (reg & 0x600) == 0x200;
+}
+
+#define MEI_CFG_FW_NM \
+ .quirk_probe = mei_me_fw_type_nm
+
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+ u32 reg;
+ /* Read ME FW Status check for SPS Firmware */
+ pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ /* if bits [19:16] = 15, running SPS Firmware */
+ return (reg & 0xf0000) == 0xf0000;
+}
+
+#define MEI_CFG_FW_SPS \
+ .quirk_probe = mei_me_fw_type_sps
+
+
+#define MEI_CFG_LEGACY_HFS \
+ .fw_status.count = 0
+
+#define MEI_CFG_ICH_HFS \
+ .fw_status.count = 1, \
+ .fw_status.status[0] = PCI_CFG_HFS_1
+
+#define MEI_CFG_PCH_HFS \
+ .fw_status.count = 2, \
+ .fw_status.status[0] = PCI_CFG_HFS_1, \
+ .fw_status.status[1] = PCI_CFG_HFS_2
+
+
+/* ICH Legacy devices */
+const struct mei_cfg mei_me_legacy_cfg = {
+ MEI_CFG_LEGACY_HFS,
+};
+
+/* ICH devices */
+const struct mei_cfg mei_me_ich_cfg = {
+ MEI_CFG_ICH_HFS,
+};
+
+/* PCH devices */
+const struct mei_cfg mei_me_pch_cfg = {
+ MEI_CFG_PCH_HFS,
+};
+
+
+/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
+const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_NM,
+};
+
+/* PCH Lynx Point with quirk for SPS Firmware exclusion */
+const struct mei_cfg mei_me_lpt_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_SPS,
+};
+
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
* @pdev: The pci device structure
+ * @cfg: per device generation config
*
* returns The mei_device_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg)
{
struct mei_device *dev;
@@ -575,7 +875,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev);
+ mei_device_init(dev, cfg);
dev->ops = &mei_me_hw_ops;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 893d5119fa9b..12b0f4bbe1f1 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -24,6 +24,8 @@
#include "mei_dev.h"
#include "client.h"
+#define MEI_ME_RPM_TIMEOUT 500 /* ms */
+
struct mei_me_hw {
void __iomem *mem_addr;
/*
@@ -31,11 +33,22 @@ struct mei_me_hw {
*/
u32 host_hw_state;
u32 me_hw_state;
+ enum mei_pg_state pg_state;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+extern const struct mei_cfg mei_me_legacy_cfg;
+extern const struct mei_cfg mei_me_ich_cfg;
+extern const struct mei_cfg mei_me_pch_cfg;
+extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg;
+extern const struct mei_cfg mei_me_lpt_cfg;
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg);
+
+int mei_me_pg_set_sync(struct mei_device *dev);
+int mei_me_pg_unset_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
index 7283c24c1af1..f19229c4e655 100644
--- a/drivers/misc/mei/hw-txe-regs.h
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -89,7 +89,7 @@ enum {
# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000
# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000
# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000
-
+#define PCI_CFG_TXE_FW_STS1 0x48
#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index f60182a52f96..93273783dec5 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -158,7 +158,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
- hw->recvd_aliveness = false;
+ dev->pg_event = MEI_PG_EVENT_WAIT;
mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
}
return do_req;
@@ -213,6 +213,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
+ dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(&dev->pdev->dev,
"aliveness settled after %d msecs\n", t);
return t;
@@ -223,6 +224,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
t += MSEC_PER_SEC / 5;
} while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+ dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(&dev->pdev->dev, "aliveness timed out\n");
return -ETIME;
}
@@ -249,19 +251,22 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
return 0;
mutex_unlock(&dev->device_lock);
- err = wait_event_timeout(hw->wait_aliveness,
- hw->recvd_aliveness, timeout);
+ err = wait_event_timeout(hw->wait_aliveness_resp,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
mutex_lock(&dev->device_lock);
hw->aliveness = mei_txe_aliveness_get(dev);
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
- dev_err(&dev->pdev->dev, "aliveness timed out");
+ dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
+ err, hw->aliveness, dev->pg_event);
else
- dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n",
- jiffies_to_msecs(timeout - err));
- hw->recvd_aliveness = false;
+ dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
+ jiffies_to_msecs(timeout - err),
+ hw->aliveness, dev->pg_event);
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
return ret;
}
@@ -280,6 +285,32 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
}
/**
+ * mei_txe_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * returns: true is pg supported, false otherwise
+ */
+static bool mei_txe_pg_is_enabled(struct mei_device *dev)
+{
+ return true;
+}
+
+/**
+ * mei_txe_pg_state - translate aliveness register value
+ * to the mei power gating state
+ *
+ * @dev: the device structure
+ *
+ * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
+}
+
+/**
* mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
*
* @dev: the device structure
@@ -589,7 +620,10 @@ static int mei_txe_write(struct mei_device *dev,
mei_txe_input_ready_interrupt_enable(dev);
if (!mei_txe_is_input_ready(dev)) {
- dev_err(&dev->pdev->dev, "Input is not ready");
+ struct mei_fw_status fw_status;
+ mei_fw_status(dev, &fw_status);
+ dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n",
+ FW_STS_PRM(fw_status));
return -EAGAIN;
}
@@ -960,9 +994,9 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
/* Clear the interrupt cause */
dev_dbg(&dev->pdev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
- hw->recvd_aliveness = true;
- if (waitqueue_active(&hw->wait_aliveness))
- wake_up(&hw->wait_aliveness);
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&hw->wait_aliveness_resp))
+ wake_up(&hw->wait_aliveness_resp);
}
@@ -1008,15 +1042,51 @@ end:
return IRQ_HANDLED;
}
+
+/**
+ * mei_txe_fw_status - retrieve fw status from the pci config space
+ *
+ * @dev: the device structure
+ * @fw_status: fw status registers storage
+ *
+ * returns: 0 on success an error code otherwise
+ */
+static int mei_txe_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1};
+ int i;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 2;
+
+ for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ pci_cfg_reg[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct mei_hw_ops mei_txe_hw_ops = {
+ .fw_status = mei_txe_fw_status,
.host_is_ready = mei_txe_host_is_ready,
+ .pg_state = mei_txe_pg_state,
+
.hw_is_ready = mei_txe_hw_is_ready,
.hw_reset = mei_txe_hw_reset,
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
+ .pg_is_enabled = mei_txe_pg_is_enabled,
+
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
@@ -1034,14 +1104,27 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
};
+#define MEI_CFG_TXE_FW_STS \
+ .fw_status.count = 2, \
+ .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \
+ .fw_status.status[1] = PCI_CFG_TXE_FW_STS1
+
+const struct mei_cfg mei_txe_cfg = {
+ MEI_CFG_TXE_FW_STS,
+};
+
+
/**
* mei_txe_dev_init - allocates and initializes txe hardware specific structure
*
* @pdev - pci device
+ * @cfg - per device generation config
+ *
* returns struct mei_device * on success or NULL;
*
*/
-struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
@@ -1051,11 +1134,11 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev);
+ mei_device_init(dev, cfg);
hw = to_txe_hw(dev);
- init_waitqueue_head(&hw->wait_aliveness);
+ init_waitqueue_head(&hw->wait_aliveness_resp);
dev->ops = &mei_txe_hw_ops;
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index 0812d98633a4..e244af79167f 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -22,6 +22,8 @@
#include "hw.h"
#include "hw-txe-regs.h"
+#define MEI_TXI_RPM_TIMEOUT 500 /* ms */
+
/* Flatten Hierarchy interrupt cause */
#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */
#define TXE_INTR_READINESS HISR_INT_0_STS
@@ -35,12 +37,11 @@
/**
* struct mei_txe_hw - txe hardware specifics
*
- * @mem_addr: SeC and BRIDGE bars
- * @aliveness: aliveness (power gating) state of the hardware
- * @readiness: readiness state of the hardware
- * @wait_aliveness: aliveness wait queue
- * @recvd_aliveness: aliveness interrupt was recived
- * @intr_cause: translated interrupt cause
+ * @mem_addr: SeC and BRIDGE bars
+ * @aliveness: aliveness (power gating) state of the hardware
+ * @readiness: readiness state of the hardware
+ * @wait_aliveness_resp: aliveness wait queue
+ * @intr_cause: translated interrupt cause
*/
struct mei_txe_hw {
void __iomem *mem_addr[NUM_OF_MEM_BARS];
@@ -48,8 +49,7 @@ struct mei_txe_hw {
u32 readiness;
u32 slots;
- wait_queue_head_t wait_aliveness;
- bool recvd_aliveness;
+ wait_queue_head_t wait_aliveness_resp;
unsigned long intr_cause;
};
@@ -61,7 +61,10 @@ static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw)
return container_of((void *)hw, struct mei_device, hw);
}
-struct mei_device *mei_txe_dev_init(struct pci_dev *pdev);
+extern const struct mei_cfg mei_txe_cfg;
+
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg);
irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 6b476ab49b2e..dd448e58cc87 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,14 +31,21 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
+#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* MEI Version
*/
-#define HBM_MINOR_VERSION 0
+#define HBM_MINOR_VERSION 1
#define HBM_MAJOR_VERSION 1
+/*
+ * MEI version with PGI support
+ */
+#define HBM_MINOR_VERSION_PGI 1
+#define HBM_MAJOR_VERSION_PGI 1
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -69,6 +76,11 @@
#define MEI_FLOW_CONTROL_CMD 0x08
+#define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a
+#define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a
+#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b
+#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -208,6 +220,17 @@ struct hbm_props_response {
} __packed;
/**
+ * struct hbm_power_gate - power gate request/response
+ *
+ * @hbm_cmd - bus message command header
+ * @reserved[3]
+ */
+struct hbm_power_gate {
+ u8 hbm_cmd;
+ u8 reserved[3];
+} __packed;
+
+/**
* struct hbm_client_connect_request - connect/disconnect request
*
* @hbm_cmd - bus message command header
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 4460975c0eef..006929222481 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -74,9 +74,13 @@ int mei_reset(struct mei_device *dev)
if (state != MEI_DEV_INITIALIZING &&
state != MEI_DEV_DISABLED &&
state != MEI_DEV_POWER_DOWN &&
- state != MEI_DEV_POWER_UP)
- dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
- mei_dev_state_str(state));
+ state != MEI_DEV_POWER_UP) {
+ struct mei_fw_status fw_status;
+ mei_fw_status(dev, &fw_status);
+ dev_warn(&dev->pdev->dev,
+ "unexpected reset: dev_state = %s " FW_STS_FMT "\n",
+ mei_dev_state_str(state), FW_STS_PRM(fw_status));
+ }
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
@@ -118,8 +122,8 @@ int mei_reset(struct mei_device *dev)
mei_amthif_reset_params(dev);
}
+ mei_hbm_reset(dev);
- dev->me_clients_num = 0;
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
@@ -303,15 +307,58 @@ void mei_stop(struct mei_device *dev)
}
EXPORT_SYMBOL_GPL(mei_stop);
+/**
+ * mei_write_is_idle - check if the write queues are idle
+ *
+ * @dev: the device structure
+ *
+ * returns true of there is no pending write
+ */
+bool mei_write_is_idle(struct mei_device *dev)
+{
+ bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
+ list_empty(&dev->ctrl_wr_list.list) &&
+ list_empty(&dev->write_list.list));
+ dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n",
+ idle,
+ mei_dev_state_str(dev->dev_state),
+ list_empty(&dev->ctrl_wr_list.list),
+ list_empty(&dev->write_list.list));
-void mei_device_init(struct mei_device *dev)
+ return idle;
+}
+EXPORT_SYMBOL_GPL(mei_write_is_idle);
+
+int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status)
+{
+ int i;
+ const struct mei_fw_status *fw_src = &dev->cfg->fw_status;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = fw_src->count;
+ for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ fw_src->status[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_fw_status);
+
+void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg)
{
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->device_list);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_hw_ready);
+ init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
@@ -340,6 +387,9 @@ void mei_device_init(struct mei_device *dev)
* 0: Reserved for MEI Bus Message communications
*/
bitmap_set(dev->host_clients_map, 0, 1);
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ dev->cfg = cfg;
}
EXPORT_SYMBOL_GPL(mei_device_init);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 147413145c97..66f0a1a06451 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -467,7 +467,6 @@ static int mei_ioctl_connect_client(struct file *file,
}
cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
cl->me_client_id);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 94a516716d22..5c7e990e2f22 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -153,6 +153,20 @@ struct mei_msg_data {
unsigned char *data;
};
+/* Maximum number of processed FW status registers */
+#define MEI_FW_STATUS_MAX 2
+
+/*
+ * struct mei_fw_status - storage of FW status data
+ *
+ * @count - number of actually available elements in array
+ * @status - FW status registers
+ */
+struct mei_fw_status {
+ int count;
+ u32 status[MEI_FW_STATUS_MAX];
+};
+
/**
* struct mei_me_client - representation of me (fw) client
*
@@ -213,6 +227,7 @@ struct mei_cl {
/** struct mei_hw_ops
*
+ * @fw_status - read FW status from PCI config space
* @host_is_ready - query for host readiness
* @hw_is_ready - query if hw is ready
@@ -220,6 +235,9 @@ struct mei_cl {
* @hw_start - start hw after reset
* @hw_config - configure hw
+ * @pg_state - power gating state of the device
+ * @pg_is_enabled - is power gating enabled
+
* @intr_clear - clear pending interrupts
* @intr_enable - enable interrupts
* @intr_disable - disable interrupts
@@ -237,6 +255,8 @@ struct mei_cl {
*/
struct mei_hw_ops {
+ int (*fw_status)(struct mei_device *dev,
+ struct mei_fw_status *fw_status);
bool (*host_is_ready)(struct mei_device *dev);
bool (*hw_is_ready)(struct mei_device *dev);
@@ -244,6 +264,9 @@ struct mei_hw_ops {
int (*hw_start)(struct mei_device *dev);
void (*hw_config)(struct mei_device *dev);
+ enum mei_pg_state (*pg_state)(struct mei_device *dev);
+ bool (*pg_is_enabled)(struct mei_device *dev);
+
void (*intr_clear)(struct mei_device *dev);
void (*intr_enable)(struct mei_device *dev);
void (*intr_disable)(struct mei_device *dev);
@@ -331,16 +354,61 @@ struct mei_cl_device {
void *priv_data;
};
+
+ /**
+ * enum mei_pg_event - power gating transition events
+ *
+ * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
+ * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
+ * @MEI_PG_EVENT_RECEIVED: the driver received pg event
+ */
+enum mei_pg_event {
+ MEI_PG_EVENT_IDLE,
+ MEI_PG_EVENT_WAIT,
+ MEI_PG_EVENT_RECEIVED,
+};
+
+/**
+ * enum mei_pg_state - device internal power gating state
+ *
+ * @MEI_PG_OFF: device is not power gated - it is active
+ * @MEI_PG_ON: device is power gated - it is in lower power state
+ */
+enum mei_pg_state {
+ MEI_PG_OFF = 0,
+ MEI_PG_ON = 1,
+};
+
+/*
+ * mei_cfg
+ *
+ * @fw_status - FW status
+ * @quirk_probe - device exclusion quirk
+ */
+struct mei_cfg {
+ const struct mei_fw_status fw_status;
+ bool (*quirk_probe)(struct pci_dev *pdev);
+};
+
+
+#define MEI_PCI_DEVICE(dev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+
/**
* struct mei_device - MEI private device struct
* @reset_count - limits the number of consecutive resets
* @hbm_state - state of host bus message protocol
+ * @pg_event - power gating event
* @mem_addr - mem mapped base register address
* @hbuf_depth - depth of hardware host/write buffer is slots
* @hbuf_is_ready - query if the host host/write buffer is ready
* @wr_msg - the buffer for hbm control messages
+ * @cfg - per device generation config and ops
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -371,6 +439,7 @@ struct mei_device {
* waiting queue for receive message from FW
*/
wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_pg;
wait_queue_head_t wait_recvd_msg;
wait_queue_head_t wait_stop_wd;
@@ -382,6 +451,14 @@ struct mei_device {
enum mei_hbm_state hbm_state;
u16 init_clients_timer;
+ /*
+ * Power Gating support
+ */
+ enum mei_pg_event pg_event;
+#ifdef CONFIG_PM_RUNTIME
+ struct dev_pm_domain pg_domain;
+#endif /* CONFIG_PM_RUNTIME */
+
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
@@ -442,6 +519,7 @@ struct mei_device {
const struct mei_hw_ops *ops;
+ const struct mei_cfg *cfg;
char hw[0] __aligned(sizeof(void *));
};
@@ -474,7 +552,7 @@ static inline u32 mei_slots2data(int slots)
/*
* mei init function prototypes
*/
-void mei_device_init(struct mei_device *dev);
+void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg);
int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
int mei_restart(struct mei_device *dev);
@@ -553,10 +631,22 @@ void mei_watchdog_unregister(struct mei_device *dev);
* Register Access Function
*/
+
static inline void mei_hw_config(struct mei_device *dev)
{
dev->ops->hw_config(dev);
}
+
+static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
+{
+ return dev->ops->pg_state(dev);
+}
+
+static inline bool mei_pg_is_enabled(struct mei_device *dev)
+{
+ return dev->ops->pg_is_enabled(dev);
+}
+
static inline int mei_hw_reset(struct mei_device *dev, bool enable)
{
return dev->ops->hw_reset(dev, enable);
@@ -629,8 +719,17 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status);
+
+#define FW_STS_FMT "%08X %08X"
+#define FW_STS_PRM(fw_status) \
+ (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \
+ (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF
+
bool mei_hbuf_acquire(struct mei_device *dev);
+bool mei_write_is_idle(struct mei_device *dev);
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
int mei_dbgfs_register(struct mei_device *dev, const char *name);
void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 95889e2e31ff..1b46c64a649f 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -33,6 +33,8 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+
#include <linux/mei.h>
#include "mei_dev.h"
@@ -42,42 +44,44 @@
/* mei_pci_tbl - PCI Device ID Table */
static const struct pci_device_id mei_me_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
/* required last entry */
{0, }
@@ -85,44 +89,33 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
+#ifdef CONFIG_PM_RUNTIME
+static inline void mei_me_set_pm_domain(struct mei_device *dev);
+static inline void mei_me_unset_pm_domain(struct mei_device *dev);
+#else
+static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
+static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* mei_quirk_probe - probe for devices that doesn't valid ME interface
*
* @pdev: PCI device structure
- * @ent: entry into pci_device_table
+ * @cfg: per generation config
*
* returns true if ME Interface is valid, false otherwise
*/
static bool mei_me_quirk_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct mei_cfg *cfg)
{
- u32 reg;
- /* Cougar Point || Patsburg */
- if (ent->device == MEI_DEV_ID_CPT_1 ||
- ent->device == MEI_DEV_ID_PBG_1) {
- pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
- /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
- if ((reg & 0x600) == 0x200)
- goto no_mei;
- }
-
- /* Lynx Point */
- if (ent->device == MEI_DEV_ID_LPT_H ||
- ent->device == MEI_DEV_ID_LPT_W ||
- ent->device == MEI_DEV_ID_LPT_HR) {
- /* Read ME FW Status check for SPS Firmware */
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
- /* if bits [19:16] = 15, running SPS Firmware */
- if ((reg & 0xf0000) == 0xf0000)
- goto no_mei;
+ if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
}
return true;
-
-no_mei:
- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
- return false;
}
+
/**
* mei_probe - Device Initialization Routine
*
@@ -133,15 +126,14 @@ no_mei:
*/
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
struct mei_device *dev;
struct mei_me_hw *hw;
int err;
- if (!mei_me_quirk_probe(pdev, ent)) {
- err = -ENODEV;
- goto end;
- }
+ if (!mei_me_quirk_probe(pdev, cfg))
+ return -ENODEV;
/* enable pci dev */
err = pci_enable_device(pdev);
@@ -173,7 +165,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev);
+ dev = mei_me_dev_init(pdev, cfg);
if (!dev) {
err = -ENOMEM;
goto release_regions;
@@ -212,6 +204,9 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto release_irq;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
err = mei_register(dev);
if (err)
goto release_irq;
@@ -220,6 +215,17 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
schedule_delayed_work(&dev->timer_work, HZ);
+ /*
+ * For not wake-able HW runtime pm framework
+ * can't be used on pci device level.
+ * Use domain runtime pm callbacks instead.
+ */
+ if (!pci_dev_run_wake(pdev))
+ mei_me_set_pm_domain(dev);
+
+ if (mei_pg_is_enabled(dev))
+ pm_runtime_put_noidle(&pdev->dev);
+
dev_dbg(&pdev->dev, "initialization successful.\n");
return 0;
@@ -259,12 +265,18 @@ static void mei_me_remove(struct pci_dev *pdev)
if (!dev)
return;
+ if (mei_pg_is_enabled(dev))
+ pm_runtime_get_noresume(&pdev->dev);
+
hw = to_me_hw(dev);
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
+ if (!pci_dev_run_wake(pdev))
+ mei_me_unset_pm_domain(dev);
+
/* disable interrupts */
mei_disable_interrupts(dev);
@@ -343,12 +355,120 @@ static int mei_me_pci_resume(struct device *device)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int mei_me_pm_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+ pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2);
+
+ return -EBUSY;
+}
+
+static int mei_me_pm_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (mei_write_is_idle(dev))
+ ret = mei_me_pg_set_sync(dev);
+ else
+ ret = -EAGAIN;
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
+
+ return ret;
+}
+
+static int mei_me_pm_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ ret = mei_me_pg_unset_sync(dev);
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_me_set_pm_domain - fill and set pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_me_set_pm_domain(struct mei_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev->dev.bus && pdev->dev.bus->pm) {
+ dev->pg_domain.ops = *pdev->dev.bus->pm;
+
+ dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
+ dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
+ dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
+
+ pdev->dev.pm_domain = &dev->pg_domain;
+ }
+}
+
+/**
+ * mei_me_unset_pm_domain - clean pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_me_unset_pm_domain(struct mei_device *dev)
+{
+ /* stop using pm callbacks if any */
+ dev->pdev->dev.pm_domain = NULL;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops mei_me_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
+ mei_me_pci_resume)
+ SET_RUNTIME_PM_OPS(
+ mei_me_pm_runtime_suspend,
+ mei_me_pm_runtime_resume,
+ mei_me_pm_runtime_idle)
+};
-static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
#else
#define MEI_ME_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index ad3adb009a1e..2343c6236df9 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -27,6 +27,7 @@
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -35,11 +36,18 @@
#include "hw-txe.h"
static const struct pci_device_id mei_txe_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */
+ {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
+#ifdef CONFIG_PM_RUNTIME
+static inline void mei_txe_set_pm_domain(struct mei_device *dev);
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
+#else
+static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
+#endif /* CONFIG_PM_RUNTIME */
static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
{
@@ -61,6 +69,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
*/
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
struct mei_device *dev;
struct mei_txe_hw *hw;
int err;
@@ -91,7 +100,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_txe_dev_init(pdev);
+ dev = mei_txe_dev_init(pdev, cfg);
if (!dev) {
err = -ENOMEM;
goto release_regions;
@@ -137,12 +146,25 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto release_irq;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
err = mei_register(dev);
if (err)
goto release_irq;
pci_set_drvdata(pdev, dev);
+ /*
+ * For not wake-able HW runtime pm framework
+ * can't be used on pci device level.
+ * Use domain runtime pm callbacks instead.
+ */
+ if (!pci_dev_run_wake(pdev))
+ mei_txe_set_pm_domain(dev);
+
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
release_irq:
@@ -187,10 +209,15 @@ static void mei_txe_remove(struct pci_dev *pdev)
return;
}
+ pm_runtime_get_noresume(&pdev->dev);
+
hw = to_txe_hw(dev);
mei_stop(dev);
+ if (!pci_dev_run_wake(pdev))
+ mei_txe_unset_pm_domain(dev);
+
/* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -265,15 +292,131 @@ static int mei_txe_pci_resume(struct device *device)
return err;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int mei_txe_pm_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+ pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2);
+
+ return -EBUSY;
+}
+static int mei_txe_pm_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (mei_write_is_idle(dev))
+ ret = mei_txe_aliveness_set_sync(dev, 0);
+ else
+ ret = -EAGAIN;
+
+ /*
+ * If everything is okay we're about to enter PCI low
+ * power state (D3) therefor we need to disable the
+ * interrupts towards host.
+ * However if device is not wakeable we do not enter
+ * D-low state and we need to keep the interrupt kicking
+ */
+ if (!ret && pci_dev_run_wake(pdev))
+ mei_disable_interrupts(dev);
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
+
+ mutex_unlock(&dev->device_lock);
+ return ret;
+}
+
+static int mei_txe_pm_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ mei_enable_interrupts(dev);
+
+ ret = mei_txe_aliveness_set_sync(dev, 1);
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_txe_set_pm_domain - fill and set pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_txe_set_pm_domain(struct mei_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev->dev.bus && pdev->dev.bus->pm) {
+ dev->pg_domain.ops = *pdev->dev.bus->pm;
+
+ dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
+ dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
+ dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
+
+ pdev->dev.pm_domain = &dev->pg_domain;
+ }
+}
-static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops,
- mei_txe_pci_suspend,
- mei_txe_pci_resume);
+/**
+ * mei_txe_unset_pm_domain - clean pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
+{
+ /* stop using pm callbacks if any */
+ dev->pdev->dev.pm_domain = NULL;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops mei_txe_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
+ mei_txe_pci_resume)
+ SET_RUNTIME_PM_OPS(
+ mei_txe_pm_runtime_suspend,
+ mei_txe_pm_runtime_resume,
+ mei_txe_pm_runtime_idle)
+};
#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
#else
#define MEI_TXE_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index ebf1cbc198fd..a84a664dfccb 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -84,8 +84,6 @@ int mei_wd_host_init(struct mei_device *dev)
return ret;
}
- cl->state = MEI_FILE_CONNECTING;
-
ret = mei_cl_connect(cl, NULL);
if (ret) {
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
new file mode 100644
index 000000000000..73068e50e56d
--- /dev/null
+++ b/drivers/misc/vexpress-syscfg.c
@@ -0,0 +1,324 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/vexpress.h>
+
+
+#define SYS_CFGDATA 0x0
+
+#define SYS_CFGCTRL 0x4
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x8
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[0]; /* Keep it last! */
+};
+
+
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
+{
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
+
+ if (WARN_ON(index > func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
+ }
+
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ if (dev->of_node) {
+ int err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+ } else {
+ if (pdev->num_resources != 1 ||
+ pdev->resource[0].flags != IORESOURCE_BUS)
+ return ERR_PTR(-EFAULT);
+
+ site = pdev->resource[0].start;
+ if (site == VEXPRESS_SITE_MASTER)
+ site = vexpress_config_get_master();
+ position = 0;
+ dcc = 0;
+ num = 1;
+ }
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
+ GFP_KERNEL);
+ if (!func)
+ return NULL;
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ if (dev->of_node) {
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+ } else {
+ function = pdev->resource[0].end;
+ device = pdev->id;
+ }
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
+ }
+
+ vexpress_syscfg_regmap_config.max_register = num - 1;
+
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
+
+ if (IS_ERR(func->regmap))
+ kfree(func);
+ else
+ list_add(&func->list, &syscfg->funcs);
+
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
+{
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
+
+ regmap_exit(regmap);
+
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
+}
+
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+/* Non-DT hack, to be gone... */
+static struct device *vexpress_syscfg_bridge;
+
+int vexpress_syscfg_device_register(struct platform_device *pdev)
+{
+ pdev->dev.parent = vexpress_syscfg_bridge;
+
+ return platform_device_register(pdev);
+}
+
+
+int vexpress_syscfg_probe(struct platform_device *pdev)
+{
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct device *bridge;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!syscfg->base)
+ return -EFAULT;
+
+ /* Must use dev.parent (MFD), as that's where DT phandle points at... */
+ bridge = vexpress_config_bridge_register(pdev->dev.parent,
+ &vexpress_syscfg_bridge_ops, syscfg);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ /* Non-DT case */
+ if (!pdev->dev.of_node)
+ vexpress_syscfg_bridge = bridge;
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+
+static int __init vexpress_syscfg_init(void)
+{
+ return platform_driver_register(&vexpress_syscfg_driver);
+}
+core_initcall(vexpress_syscfg_init);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 771c60ab4a32..a084edd37af5 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -23,6 +24,7 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
@@ -364,7 +366,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
#ifdef CONFIG_DMA_ENGINE
static void mmci_dma_setup(struct mmci_host *host)
{
- struct mmci_platform_data *plat = host->plat;
const char *rxname, *txname;
dma_cap_mask_t mask;
@@ -378,25 +379,6 @@ static void mmci_dma_setup(struct mmci_host *host)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- if (plat && plat->dma_filter) {
- if (!host->dma_rx_channel && plat->dma_rx_param) {
- host->dma_rx_channel = dma_request_channel(mask,
- plat->dma_filter,
- plat->dma_rx_param);
- /* E.g if no DMA hardware is present */
- if (!host->dma_rx_channel)
- dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
- }
-
- if (!host->dma_tx_channel && plat->dma_tx_param) {
- host->dma_tx_channel = dma_request_channel(mask,
- plat->dma_filter,
- plat->dma_tx_param);
- if (!host->dma_tx_channel)
- dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
- }
- }
-
/*
* If only an RX channel is specified, the driver will
* attempt to use it bidirectionally, however if it is
@@ -444,11 +426,9 @@ static void mmci_dma_setup(struct mmci_host *host)
*/
static inline void mmci_dma_release(struct mmci_host *host)
{
- struct mmci_platform_data *plat = host->plat;
-
if (host->dma_rx_channel)
dma_release_channel(host->dma_rx_channel);
- if (host->dma_tx_channel && plat->dma_tx_param)
+ if (host->dma_tx_channel)
dma_release_channel(host->dma_tx_channel);
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
@@ -1285,7 +1265,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* indicating signal direction for the signals in
* the SD/MMC bus and feedback-clock usage.
*/
- pwr |= host->plat->sigdir;
+ pwr |= host->pwr_reg_add;
if (ios->bus_width == MMC_BUS_WIDTH_4)
pwr &= ~MCI_ST_DATA74DIREN;
@@ -1326,35 +1306,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
pm_runtime_put_autosuspend(mmc_dev(mmc));
}
-static int mmci_get_ro(struct mmc_host *mmc)
-{
- struct mmci_host *host = mmc_priv(mmc);
-
- if (host->gpio_wp == -ENOSYS)
- return -ENOSYS;
-
- return gpio_get_value_cansleep(host->gpio_wp);
-}
-
static int mmci_get_cd(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmci_platform_data *plat = host->plat;
- unsigned int status;
+ unsigned int status = mmc_gpio_get_cd(mmc);
- if (host->gpio_cd == -ENOSYS) {
+ if (status == -ENOSYS) {
if (!plat->status)
return 1; /* Assume always present */
status = plat->status(mmc_dev(host->mmc));
- } else
- status = !!gpio_get_value_cansleep(host->gpio_cd)
- ^ plat->cd_invert;
-
- /*
- * Use positive logic throughout - status is zero for no card,
- * non-zero for card inserted.
- */
+ }
return status;
}
@@ -1391,70 +1354,44 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
-static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
-{
- struct mmci_host *host = dev_id;
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(500));
-
- return IRQ_HANDLED;
-}
-
static struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.pre_req = mmci_pre_request,
.post_req = mmci_post_request,
.set_ios = mmci_set_ios,
- .get_ro = mmci_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.get_cd = mmci_get_cd,
.start_signal_voltage_switch = mmci_sig_volt_switch,
};
-#ifdef CONFIG_OF
-static void mmci_dt_populate_generic_pdata(struct device_node *np,
- struct mmci_platform_data *pdata)
+static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
{
- int bus_width = 0;
-
- pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
- pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = mmc_of_parse(mmc);
- if (of_get_property(np, "cd-inverted", NULL))
- pdata->cd_invert = true;
- else
- pdata->cd_invert = false;
+ if (ret)
+ return ret;
- of_property_read_u32(np, "max-frequency", &pdata->f_max);
- if (!pdata->f_max)
- pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+ if (of_get_property(np, "st,sig-dir-dat0", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA0DIREN;
+ if (of_get_property(np, "st,sig-dir-dat2", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA2DIREN;
+ if (of_get_property(np, "st,sig-dir-dat31", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA31DIREN;
+ if (of_get_property(np, "st,sig-dir-dat74", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA74DIREN;
+ if (of_get_property(np, "st,sig-dir-cmd", NULL))
+ host->pwr_reg_add |= MCI_ST_CMDDIREN;
+ if (of_get_property(np, "st,sig-pin-fbclk", NULL))
+ host->pwr_reg_add |= MCI_ST_FBCLKEN;
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
- pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
- pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
- of_property_read_u32(np, "bus-width", &bus_width);
- switch (bus_width) {
- case 0 :
- /* No bus-width supplied. */
- break;
- case 4 :
- pdata->capabilities |= MMC_CAP_4_BIT_DATA;
- break;
- case 8 :
- pdata->capabilities |= MMC_CAP_8_BIT_DATA;
- break;
- default :
- pr_warn("%s: Unsupported bus width\n", np->full_name);
- }
-}
-#else
-static void mmci_dt_populate_generic_pdata(struct device_node *np,
- struct mmci_platform_data *pdata)
-{
- return;
+ return 0;
}
-#endif
static int mmci_probe(struct amba_device *dev,
const struct amba_id *id)
@@ -1478,26 +1415,17 @@ static int mmci_probe(struct amba_device *dev,
return -ENOMEM;
}
- if (np)
- mmci_dt_populate_generic_pdata(np, plat);
+ mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
+ if (!mmc)
+ return -ENOMEM;
- ret = amba_request_regions(dev, DRIVER_NAME);
+ ret = mmci_of_parse(np, mmc);
if (ret)
- goto out;
-
- mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto rel_regions;
- }
+ goto host_free;
host = mmc_priv(mmc);
host->mmc = mmc;
- host->gpio_wp = -ENOSYS;
- host->gpio_cd = -ENOSYS;
- host->gpio_cd_irq = -1;
-
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1529,10 +1457,11 @@ static int mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+
host->phybase = dev->res.start;
- host->base = ioremap(dev->res.start, resource_size(&dev->res));
- if (!host->base) {
- ret = -ENOMEM;
+ host->base = devm_ioremap_resource(&dev->dev, &dev->res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto clk_disable;
}
@@ -1546,15 +1475,13 @@ static int mmci_probe(struct amba_device *dev,
else
mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
/*
- * If the platform data supplies a maximum operating
- * frequency, this takes precedence. Else, we fall back
- * to using the module parameter, which has a (low)
- * default value in case it is not specified. Either
- * value must not exceed the clock rate into the block,
- * of course.
+ * If no maximum operating frequency is supplied, fall back to use
+ * the module parameter, which has a (low) default value in case it
+ * is not specified. Either value must not exceed the clock rate into
+ * the block, of course.
*/
- if (plat->f_max)
- mmc->f_max = min(host->mclk, plat->f_max);
+ if (mmc->f_max)
+ mmc->f_max = min(host->mclk, mmc->f_max);
else
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
@@ -1566,8 +1493,15 @@ static int mmci_probe(struct amba_device *dev,
else if (plat->ocr_mask)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
- mmc->caps = plat->capabilities;
- mmc->caps2 = plat->capabilities2;
+ /* DT takes precedence over platform data. */
+ if (!np) {
+ if (!plat->cd_invert)
+ mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* We support these capabilities. */
+ mmc->caps |= MMC_CAP_CMD23;
if (variant->busy_detect) {
mmci_ops.card_busy = mmci_card_busy;
@@ -1579,7 +1513,7 @@ static int mmci_probe(struct amba_device *dev,
mmc->ops = &mmci_ops;
/* We support these PM capabilities. */
- mmc->pm_caps = MMC_PM_KEEP_POWER;
+ mmc->pm_caps |= MMC_PM_KEEP_POWER;
/*
* We can do SGIO
@@ -1616,62 +1550,30 @@ static int mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
- if (plat->gpio_cd == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_gpio_cd;
- }
- if (gpio_is_valid(plat->gpio_cd)) {
- ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
- if (ret == 0)
- ret = gpio_direction_input(plat->gpio_cd);
- if (ret == 0)
- host->gpio_cd = plat->gpio_cd;
- else if (ret != -ENOSYS)
- goto err_gpio_cd;
-
- /*
- * A gpio pin that will detect cards when inserted and removed
- * will most likely want to trigger on the edges if it is
- * 0 when ejected and 1 when inserted (or mutatis mutandis
- * for the inverted case) so we request triggers on both
- * edges.
- */
- ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
- mmci_cd_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRIVER_NAME " (cd)", host);
- if (ret >= 0)
- host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
- }
- if (plat->gpio_wp == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_gpio_wp;
+ /* If DT, cd/wp gpios must be supplied through it. */
+ if (!np && gpio_is_valid(plat->gpio_cd)) {
+ ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
+ if (ret)
+ goto clk_disable;
}
- if (gpio_is_valid(plat->gpio_wp)) {
- ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
- if (ret == 0)
- ret = gpio_direction_input(plat->gpio_wp);
- if (ret == 0)
- host->gpio_wp = plat->gpio_wp;
- else if (ret != -ENOSYS)
- goto err_gpio_wp;
+ if (!np && gpio_is_valid(plat->gpio_wp)) {
+ ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
+ if (ret)
+ goto clk_disable;
}
- if ((host->plat->status || host->gpio_cd != -ENOSYS)
- && host->gpio_cd_irq < 0)
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
+ ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
- goto unmap;
+ goto clk_disable;
if (!dev->irq[1])
host->singleirq = true;
else {
- ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
- DRIVER_NAME " (pio)", host);
+ ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
+ IRQF_SHARED, DRIVER_NAME " (pio)", host);
if (ret)
- goto irq0_free;
+ goto clk_disable;
}
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
@@ -1693,25 +1595,10 @@ static int mmci_probe(struct amba_device *dev,
return 0;
- irq0_free:
- free_irq(dev->irq[0], host);
- unmap:
- if (host->gpio_wp != -ENOSYS)
- gpio_free(host->gpio_wp);
- err_gpio_wp:
- if (host->gpio_cd_irq >= 0)
- free_irq(host->gpio_cd_irq, host);
- if (host->gpio_cd != -ENOSYS)
- gpio_free(host->gpio_cd);
- err_gpio_cd:
- iounmap(host->base);
clk_disable:
clk_disable_unprepare(host->clk);
host_free:
mmc_free_host(mmc);
- rel_regions:
- amba_release_regions(dev);
- out:
return ret;
}
@@ -1737,92 +1624,46 @@ static int mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCIDATACTRL);
mmci_dma_release(host);
- free_irq(dev->irq[0], host);
- if (!host->singleirq)
- free_irq(dev->irq[1], host);
-
- if (host->gpio_wp != -ENOSYS)
- gpio_free(host->gpio_wp);
- if (host->gpio_cd_irq >= 0)
- free_irq(host->gpio_cd_irq, host);
- if (host->gpio_cd != -ENOSYS)
- gpio_free(host->gpio_cd);
-
- iounmap(host->base);
clk_disable_unprepare(host->clk);
-
mmc_free_host(mmc);
-
- amba_release_regions(dev);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_SUSPEND
-static int mmci_suspend(struct device *dev)
-{
- struct amba_device *adev = to_amba_device(dev);
- struct mmc_host *mmc = amba_get_drvdata(adev);
-
- if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
- pm_runtime_get_sync(dev);
- writel(0, host->base + MMCIMASK0);
}
return 0;
}
-static int mmci_resume(struct device *dev)
-{
- struct amba_device *adev = to_amba_device(dev);
- struct mmc_host *mmc = amba_get_drvdata(adev);
-
- if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
- pm_runtime_put(dev);
- }
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void mmci_save(struct mmci_host *host)
{
unsigned long flags;
- if (host->variant->pwrreg_nopower) {
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
- writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK0);
+ if (host->variant->pwrreg_nopower) {
writel(0, host->base + MMCIDATACTRL);
writel(0, host->base + MMCIPOWER);
writel(0, host->base + MMCICLOCK);
- mmci_reg_delay(host);
-
- spin_unlock_irqrestore(&host->lock, flags);
}
+ mmci_reg_delay(host);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void mmci_restore(struct mmci_host *host)
{
unsigned long flags;
- if (host->variant->pwrreg_nopower) {
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->variant->pwrreg_nopower) {
writel(host->clk_reg, host->base + MMCICLOCK);
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
writel(host->pwr_reg, host->base + MMCIPOWER);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
- mmci_reg_delay(host);
-
- spin_unlock_irqrestore(&host->lock, flags);
}
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
}
static int mmci_runtime_suspend(struct device *dev)
@@ -1857,8 +1698,9 @@ static int mmci_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops mmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
- SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static struct amba_id mmci_ids[] = {
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 58b1b8896bf2..347d942d740b 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -13,6 +13,16 @@
#define MCI_PWR_ON 0x03
#define MCI_OD (1 << 6)
#define MCI_ROD (1 << 7)
+/*
+ * The ST Micro version does not have ROD and reuse the voltage registers for
+ * direction settings.
+ */
+#define MCI_ST_DATA2DIREN (1 << 2)
+#define MCI_ST_CMDDIREN (1 << 3)
+#define MCI_ST_DATA0DIREN (1 << 4)
+#define MCI_ST_DATA31DIREN (1 << 5)
+#define MCI_ST_FBCLKEN (1 << 7)
+#define MCI_ST_DATA74DIREN (1 << 8)
#define MMCICLOCK 0x004
#define MCI_CLK_ENABLE (1 << 8)
@@ -176,9 +186,6 @@ struct mmci_host {
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
- int gpio_cd;
- int gpio_wp;
- int gpio_cd_irq;
bool singleirq;
spinlock_t lock;
@@ -186,6 +193,7 @@ struct mmci_host {
unsigned int mclk;
unsigned int cclk;
u32 pwr_reg;
+ u32 pwr_reg_add;
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0b2ccb68c0d0..4dbfaee9aa95 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
-
- buf = req->buffer;
+ buf = bio_data(req->bio);
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 8d659e6a1b4c..20a667c95da4 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
* flash access anyway.
*/
mutex_lock(&dev->dev_mutex);
- ret = ubiblock_read(dev, req->buffer, sec, len);
+ ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
mutex_unlock(&dev->dev_mutex);
return ret;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
}
/* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
bond->alb_info.rlb_promisc_timeout_counter = 0;
- alb_send_learning_packets(bond->curr_active_slave, addr);
+ alb_send_learning_packets(bond->curr_active_slave, addr, true);
}
/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
- u16 vid)
+ __be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->dev = slave->dev;
if (vid) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+ skb = vlan_put_tag(skb, vlan_proto, vid);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
dev_queue_xmit(skb);
}
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
/* send untagged */
- alb_send_lp_vid(slave, mac_addr, 0);
+ alb_send_lp_vid(slave, mac_addr, 0, 0);
/* loop through vlans and send one packet for each */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper->priv_flags & IFF_802_1Q_VLAN)
- alb_send_lp_vid(slave, mac_addr,
- vlan_dev_vlan_id(upper));
+ if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+ if (strict_match &&
+ ether_addr_equal_64bits(mac_addr,
+ upper->dev_addr)) {
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ } else if (!strict_match) {
+ alb_send_lp_vid(slave, upper->dev_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ }
+ }
}
rcu_read_unlock();
}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
/* fasten the change in the switch */
if (SLAVE_IS_OK(slave1)) {
- alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+ alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
}
if (SLAVE_IS_OK(slave2)) {
- alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+ alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
/* send learning packets */
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+ bool strict_match;
+
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave_rcu(bond, slave, iter)
- alb_send_learning_packets(slave, slave->dev->dev_addr);
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ /* If updating current_active, use all currently
+ * user mac addreses (!strict_match). Otherwise, only
+ * use mac of the slave device.
+ */
+ strict_match = (slave != bond->curr_active_slave);
+ alb_send_learning_packets(slave, slave->dev->dev_addr,
+ strict_match);
+ }
read_unlock(&bond->curr_slave_lock);
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+ alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+ false);
}
write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
read_lock(&bond->lock);
- alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+ alb_send_learning_packets(bond->curr_active_slave,
+ bond_dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
*/
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
__be32 dest_ip, __be32 src_ip,
- struct bond_vlan_tag *inner,
- struct bond_vlan_tag *outer)
+ struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
+ int i;
pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
- if (outer->vlan_id) {
- if (inner->vlan_id) {
- pr_debug("inner tag: proto %X vid %X\n",
- ntohs(inner->vlan_proto), inner->vlan_id);
- skb = __vlan_put_tag(skb, inner->vlan_proto,
- inner->vlan_id);
- if (!skb) {
- net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
- }
- }
- pr_debug("outer reg: proto %X vid %X\n",
- ntohs(outer->vlan_proto), outer->vlan_id);
- skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+ /* Go through all the tags backwards and add them to the packet */
+ for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+ if (!tags[i].vlan_id)
+ continue;
+
+ pr_debug("inner tag: proto %X vid %X\n",
+ ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+ skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+ tags[i].vlan_id);
+ if (!skb) {
+ net_err_ratelimited("failed to insert inner VLAN tag\n");
+ return;
+ }
+ }
+ /* Set the outer tag */
+ if (tags[0].vlan_id) {
+ pr_debug("outer tag: proto %X vid %X\n",
+ ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+ skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
arp_xmit(skb);
}
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int idx;
+
+ if (start_dev == end_dev)
+ return true;
+
+ netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+ if (bond_verify_device_path(upper, end_dev, tags)) {
+ if (is_vlan_dev(upper)) {
+ idx = vlan_get_encap_level(upper);
+ if (idx >= BOND_MAX_VLAN_ENCAP)
+ return false;
+
+ tags[idx].vlan_proto =
+ vlan_dev_vlan_proto(upper);
+ tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- struct net_device *upper, *vlan_upper;
- struct list_head *iter, *vlan_iter;
struct rtable *rt;
- struct bond_vlan_tag inner, outer;
+ struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
__be32 *targets = bond->params.arp_targets, addr;
int i;
+ bool ret;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
- inner.vlan_proto = 0;
- inner.vlan_id = 0;
- outer.vlan_proto = 0;
- outer.vlan_id = 0;
+ memset(tags, 0, sizeof(tags));
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ 0, tags);
continue;
}
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
goto found;
rcu_read_lock();
- /* first we search only for vlan devices. for every vlan
- * found we verify its upper dev list, searching for the
- * rt->dst.dev. If found we save the tag of the vlan and
- * proceed to send the packet.
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
- vlan_iter) {
- if (!is_vlan_dev(vlan_upper))
- continue;
-
- if (vlan_upper == rt->dst.dev) {
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
- iter) {
- if (upper == rt->dst.dev) {
- /* If the upper dev is a vlan dev too,
- * set the vlan tag to inner tag.
- */
- if (is_vlan_dev(upper)) {
- inner.vlan_proto = vlan_dev_vlan_proto(upper);
- inner.vlan_id = vlan_dev_vlan_id(upper);
- }
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- }
- }
-
- /* if the device we're looking for is not on top of any of
- * our upper vlans, then just search for any dev that
- * matches, and in case it's a vlan - save the id
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper == rt->dst.dev) {
- rcu_read_unlock();
- goto found;
- }
- }
+ ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
rcu_read_unlock();
+ if (ret)
+ goto found;
+
/* Not our device - skip */
pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, &inner, &outer);
+ addr, tags);
}
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
};
static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define BOND_MAX_VLAN_ENCAP 2
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
boards like am335x, dm814x, dm813x and dm811x.
-config CAN_C_CAN_STRICT_FRAME_ORDERING
- bool "Force a strict RX CAN frame order (may cause frame loss)"
- ---help---
- The RX split buffer prevents packet reordering but can cause packet
- loss. Only enable this option when you accept to lose CAN frames
- in favour of getting the received CAN frames in the correct order.
-
config CAN_C_CAN_PCI
tristate "Generic PCI Bus based C_CAN/D_CAN driver"
depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a2ca820b5373..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
static inline void c_can_rx_object_get(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
- else
-#endif
c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
}
static inline void c_can_rx_finalize(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- priv->rxmasked |= BIT(obj - 1);
- else if (obj == C_CAN_MSG_RX_LOW_LAST) {
- priv->rxmasked = 0;
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
- }
-#endif
if (priv->type != BOSCH_D_CAN)
c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
}
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
{
u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- pend &= ~priv->rxmasked;
-#endif
return pend;
}
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
* has arrived. To work-around this issue, we keep two groups of message
* objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
*
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- * the INTPND bit.
- * - if the current message object number is equal to
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- * receive message objects.
- * - if the current message object number is greater than
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- * only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
* We clear the newdat bit right away.
*
* This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev;
+ struct net_device *dev, *prev_dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
writew(0x0, cfg_base + PITA_ICR + 2);
chan = NULL;
- for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
+ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
priv = netdev_priv(dev);
chan = priv->priv;
+ prev_dev = chan->prev_dev;
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
}
/* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
/* Loop over all registered devices */
while (1) {
+ struct net_device *prev_dev = chan->prev_dev;
+
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->prev_dev;
+ dev = prev_dev;
if (!dev) {
/* do that only for first channel */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+ tristate "Beckhoff CX5020 EtherCAT master support"
+ depends on PCI
+ ---help---
+ Driver for EtherCAT master module located on CCAT FPGA
+ that can be found on Beckhoff CX5020, and possibly other of CX
+ Beckhoff CX series industrial PCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ec_bhf.
+
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
/* Reset Rx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+ msgdma_csroffs(status));
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&rxcsr->status,
+ if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Rx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
/* Reset Tx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+ msgdma_csroffs(status));
+
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&txcsr->status,
+ if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Tx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
}
void msgdma_disable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_disable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
}
void msgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
}
/* return 0 to indicate transmit is pending */
int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
- iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
- iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
- iowrite32(0, &desc->write_addr_lo);
- iowrite32(0, &desc->write_addr_hi);
- iowrite32(buffer->len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
- iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+ csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_lo));
+ csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_hi));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+ csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+ msgdma_descroffs(stride));
+ csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+ msgdma_descroffs(control));
return 0;
}
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
u32 ready = 0;
u32 inuse;
u32 status;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
/* Get number of sent descriptors */
- inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+ inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+ & 0xffff;
if (inuse) { /* Tx FIFO is not empty */
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
} else {
/* Check for buffered last packet */
- status = ioread32(&txcsr->status);
+ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
if (status & MSGDMA_CSR_STAT_BUSY)
ready = priv->tx_prod - priv->tx_cons - 1;
else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
void msgdma_add_rx_desc(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct msgdma_extended_desc *desc = priv->rx_dma_desc;
u32 len = priv->rx_dma_buf_sz;
dma_addr_t dma_addr = rxbuffer->dma_addr;
u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
| MSGDMA_DESC_CTL_TR_ERR_IRQ
| MSGDMA_DESC_CTL_GO);
- iowrite32(0, &desc->read_addr_lo);
- iowrite32(0, &desc->read_addr_hi);
- iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
- iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
- iowrite32(len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(0x00010001, &desc->stride);
- iowrite32(control, &desc->control);
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+ csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_lo));
+ csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_hi));
+ csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+ csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
}
/* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
u32 rxstatus = 0;
u32 pktlength;
u32 pktstatus;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
- struct msgdma_response *rxresp =
- (struct msgdma_response *)priv->rx_dma_resp;
-
- if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
- pktlength = ioread32(&rxresp->bytes_transferred);
- pktstatus = ioread32(&rxresp->status);
+
+ if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+ & 0xffff) {
+ pktlength = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(bytes_transferred));
+ pktstatus = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(status));
rxstatus = pktstatus;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
#ifndef __ALTERA_MSGDMAHW_H__
#define __ALTERA_MSGDMAHW_H__
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
- u32 read_addr; /* data buffer source address */
- u32 write_addr; /* data buffer destination address */
- u32 len; /* the number of bytes to transfer per descriptor */
- u32 control; /* characteristics of the transfer */
-};
-
/* mSGDMA extended descriptor format
*/
struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
u32 status;
};
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
/* mSGDMA response register bit definitions
*/
#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
#include "altera_sgdmahw.h"
#include "altera_sgdma.h"
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed);
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_async_read(struct altera_tse_private *priv);
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_txbusy(struct altera_tse_private *priv);
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
priv->rxdescphys = (dma_addr_t) 0;
priv->txdescphys = (dma_addr_t) 0;
- priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+ priv->rxdescphys = dma_map_single(priv->device,
+ (void __force *)priv->rx_dma_desc,
priv->rxdescmem, DMA_BIDIRECTIONAL);
if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
return -EINVAL;
}
- priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+ priv->txdescphys = dma_map_single(priv->device,
+ (void __force *)priv->tx_dma_desc,
priv->txdescmem, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
}
/* Initialize descriptor memory to all 0's, sync memory to cache */
- memset(priv->tx_dma_desc, 0, priv->txdescmem);
- memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
*/
void sgdma_reset(struct altera_tse_private *priv)
{
- u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
- u32 txdescriplen = priv->txdescmem;
- u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
- u32 rxdescriplen = priv->rxdescmem;
- struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
- struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
/* Initialize descriptor memory to 0 */
- memset(ptxdescripmem, 0, txdescriplen);
- memset(prxdescripmem, 0, rxdescriplen);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
- iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
- iowrite32(0, &ptxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
- iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
- iowrite32(0, &prxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
}
/* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
void sgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
void sgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
/* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
*/
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- int pktstx = 0;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
/* wait 'til the tx sgdma is ready for the next transmit request */
if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
0, /* read fixed */
SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
- pktstx = sgdma_async_write(priv, cdesc);
+ sgdma_async_write(priv, cdesc);
/* enqueue the request to the pending transmit queue */
queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
u32 sgdma_tx_completions(struct altera_tse_private *priv)
{
u32 ready = 0;
- struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
if (!sgdma_txbusy(priv) &&
- ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+ ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+ & SGDMA_CONTROL_HW_OWNED) == 0) &&
(dequeue_tx(priv))) {
ready = 1;
}
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
*/
u32 sgdma_rx_status(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
- struct sgdma_descrip *desc = NULL;
- int pktsrx;
- unsigned int rxstatus = 0;
- unsigned int pktlength = 0;
- unsigned int pktstatus = 0;
+ struct sgdma_descrip __iomem *base =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *desc = NULL;
struct tse_buffer *rxbuffer = NULL;
+ unsigned int rxstatus = 0;
- u32 sts = ioread32(&csr->status);
+ u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
desc = &base[0];
if (sts & SGDMA_STSREG_EOP) {
+ unsigned int pktlength = 0;
+ unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
priv->sgdmadesclen,
DMA_FROM_DEVICE);
- pktlength = desc->bytes_xferred;
- pktstatus = desc->status & 0x3f;
- rxstatus = pktstatus;
+ pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+ pktstatus = csrrd8(desc, sgdma_descroffs(status));
+ rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
if (rxstatus) {
- desc->status = 0;
+ csrwr8(0, desc, sgdma_descroffs(status));
rxbuffer = dequeue_rx(priv);
if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
"sgdma rx and rx queue empty!\n");
/* Clear control */
- iowrite32(0, &csr->control);
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
/* clear status */
- iowrite32(0xf, &csr->status);
+ csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
/* kick the rx sgdma after reaping this descriptor */
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
} else {
/* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
*/
netdev_err(priv->dev,
"SGDMA RX Error Info: %x, %x, %x\n",
- sts, desc->status, rxstatus);
+ sts, csrrd8(desc, sgdma_descroffs(status)),
+ rxstatus);
}
} else if (sts == 0) {
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
}
return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
/* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed)
{
/* Clear the next descriptor as not owned by hardware */
- u32 ctrl = ndesc->control;
+
+ u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
ctrl &= ~SGDMA_CONTROL_HW_OWNED;
- ndesc->control = ctrl;
+ csrwr8(ctrl, ndesc, sgdma_descroffs(control));
- ctrl = 0;
ctrl = SGDMA_CONTROL_HW_OWNED;
ctrl |= generate_eop;
ctrl |= rfixed;
ctrl |= wfixed;
/* Channel is implicitly zero, initialized to 0 by default */
-
- desc->raddr = raddr;
- desc->waddr = waddr;
- desc->next = lower_32_bits(ndesc_phys);
- desc->control = ctrl;
- desc->status = 0;
- desc->rburst = 0;
- desc->wburst = 0;
- desc->bytes = length;
- desc->bytes_xferred = 0;
+ csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+ csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+ csrwr32(0, desc, sgdma_descroffs(pad1));
+ csrwr32(0, desc, sgdma_descroffs(pad2));
+ csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+ csrwr8(ctrl, desc, sgdma_descroffs(control));
+ csrwr8(0, desc, sgdma_descroffs(status));
+ csrwr8(0, desc, sgdma_descroffs(wburst));
+ csrwr8(0, desc, sgdma_descroffs(rburst));
+ csrwr16(length, desc, sgdma_descroffs(bytes));
+ csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
}
/* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
priv->sgdmadesclen,
DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+ priv->rx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+ priv->rx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
}
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
if (sgdma_txbusy(priv))
return 0;
/* clear control and status */
- iowrite32(0, &csr->control);
- iowrite32(0x1f, &csr->status);
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->sgdmadesclen, DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+ priv->tx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+ priv->tx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->txdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->rxdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
*/
static int sgdma_rxbusy(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+ return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY;
}
/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */
- while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+ while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
- if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+ if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) {
netdev_err(priv->dev, "timeout waiting for tx dma\n");
return 1;
}
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
/* SGDMA descriptor structure */
struct sgdma_descrip {
- unsigned int raddr; /* address of data to be read */
- unsigned int pad1;
- unsigned int waddr;
- unsigned int pad2;
- unsigned int next;
- unsigned int pad3;
- unsigned short bytes;
- unsigned char rburst;
- unsigned char wburst;
- unsigned short bytes_xferred; /* 16 bits, bytes xferred */
+ u32 raddr; /* address of data to be read */
+ u32 pad1;
+ u32 waddr;
+ u32 pad2;
+ u32 next;
+ u32 pad3;
+ u16 bytes;
+ u8 rburst;
+ u8 wburst;
+ u16 bytes_xferred; /* 16 bits, bytes xferred */
/* bit 0: error
* bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
* bit 6: reserved
* bit 7: status eop for recv case
*/
- unsigned char status;
+ u8 status;
/* bit 0: eop
* bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
* bits 3,4,5,6: Channel (always 0)
* bit 7: hardware owned
*/
- unsigned char control;
+ u8 control;
} __packed;
@@ -101,6 +101,8 @@ struct sgdma_csr {
u32 pad3[3];
};
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
#define SGDMA_STSREG_ERR BIT(0) /* Error */
#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
u32 reserved5[42];
};
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
/* Transmit and Receive Command Registers Bit Definitions
*/
#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
*/
void altera_tse_set_ethtool_ops(struct net_device *);
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writeb(val, paddr);
+}
+
#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 76133caffa78..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 *buf)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
u64 ext;
- buf[0] = ioread32(&mac->frames_transmitted_ok);
- buf[1] = ioread32(&mac->frames_received_ok);
- buf[2] = ioread32(&mac->frames_check_sequence_errors);
- buf[3] = ioread32(&mac->alignment_errors);
+ buf[0] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_transmitted_ok));
+ buf[1] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_received_ok));
+ buf[2] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_check_sequence_errors));
+ buf[3] = csrrd32(priv->mac_dev,
+ tse_csroffs(alignment_errors));
/* Extended aOctetsTransmittedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
- ext |= ioread32(&mac->octets_transmitted_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_transmitted_ok));
buf[4] = ext;
/* Extended aOctetsReceivedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
- ext |= ioread32(&mac->octets_received_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_received_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_received_ok));
buf[5] = ext;
- buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
- buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
- buf[8] = ioread32(&mac->if_in_errors);
- buf[9] = ioread32(&mac->if_out_errors);
- buf[10] = ioread32(&mac->if_in_ucast_pkts);
- buf[11] = ioread32(&mac->if_in_multicast_pkts);
- buf[12] = ioread32(&mac->if_in_broadcast_pkts);
- buf[13] = ioread32(&mac->if_out_discards);
- buf[14] = ioread32(&mac->if_out_ucast_pkts);
- buf[15] = ioread32(&mac->if_out_multicast_pkts);
- buf[16] = ioread32(&mac->if_out_broadcast_pkts);
- buf[17] = ioread32(&mac->ether_stats_drop_events);
+ buf[6] = csrrd32(priv->mac_dev,
+ tse_csroffs(tx_pause_mac_ctrl_frames));
+ buf[7] = csrrd32(priv->mac_dev,
+ tse_csroffs(rx_pause_mac_ctrl_frames));
+ buf[8] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_errors));
+ buf[9] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_errors));
+ buf[10] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_ucast_pkts));
+ buf[11] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_multicast_pkts));
+ buf[12] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_broadcast_pkts));
+ buf[13] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_discards));
+ buf[14] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_ucast_pkts));
+ buf[15] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_multicast_pkts));
+ buf[16] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_broadcast_pkts));
+ buf[17] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_drop_events));
/* Extended etherStatsOctets counter */
- ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
- ext |= ioread32(&mac->ether_stats_octets);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_ether_stats_octets)) << 32;
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_octets));
buf[18] = ext;
- buf[19] = ioread32(&mac->ether_stats_pkts);
- buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
- buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
- buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
- buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
- buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
- buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
- buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
- buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
- buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
- buf[29] = ioread32(&mac->ether_stats_jabbers);
- buf[30] = ioread32(&mac->ether_stats_fragments);
+ buf[19] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts));
+ buf[20] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_undersize_pkts));
+ buf[21] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_oversize_pkts));
+ buf[22] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_64_octets));
+ buf[23] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_65to127_octets));
+ buf[24] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_128to255_octets));
+ buf[25] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_256to511_octets));
+ buf[26] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_512to1023_octets));
+ buf[27] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1024to1518_octets));
+ buf[28] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1519tox_octets));
+ buf[29] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_jabbers));
+ buf[30] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_fragments));
}
static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
{
int i;
struct altera_tse_private *priv = netdev_priv(dev);
- u32 *tse_mac_regs = (u32 *)priv->mac_dev;
u32 *buf = regbuf;
/* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (i = 0; i < TSE_NUM_REGS; i++)
- buf[i] = ioread32(&tse_mac_regs[i]);
+ buf[i] = csrrd32(priv->mac_dev, i * 4);
}
static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
- u32 data;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* get the data */
- data = ioread32(&mdio_regs[regnum]) & 0xffff;
- return data;
+ return csrrd32(priv->mac_dev,
+ tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
}
static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* write the data */
- iowrite32((u32) value, &mdio_regs[regnum]);
+ csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
return 0;
}
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
for (i = 0; i < PHY_MAX_ADDR; i++)
mdio->irq[i] = PHY_POLL;
- mdio->priv = priv->mac_dev;
+ mdio->priv = dev;
mdio->parent = priv->device;
ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
enum netdev_tx ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
- int txcomplete = 0;
spin_lock_bh(&priv->tx_lock);
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_sync_single_for_device(priv->device, buffer->dma_addr,
buffer->len, DMA_TO_DEVICE);
- txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+ priv->dmaops->tx_buffer(priv, buffer);
skb_tx_timestamp(skb);
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev = NULL;
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
- int ret;
if (priv->phy_addr != POLL_PHY) {
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
netdev_err(dev, "Could not attach to PHY\n");
} else {
+ int ret;
phydev = phy_find_first(priv->mdio);
if (phydev == NULL) {
netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
{
- struct altera_tse_mac *mac = priv->mac_dev;
u32 msb;
u32 lsb;
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
/* Set primary MAC address */
- iowrite32(msb, &mac->mac_addr_0);
- iowrite32(lsb, &mac->mac_addr_1);
+ csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+ csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
}
/* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
*/
static int reset_mac(struct altera_tse_private *priv)
{
- void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
int counter;
u32 dat;
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+ if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_SW_RESET))
break;
udelay(1);
}
if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~MAC_CMDCFG_SW_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
return -1;
}
return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
*/
static int init_mac(struct altera_tse_private *priv)
{
- struct altera_tse_mac *mac = priv->mac_dev;
unsigned int cmd = 0;
u32 frm_length;
/* Setup Rx FIFO */
- iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
- &mac->rx_section_empty);
- iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
- iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
- iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+ csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(rx_section_empty));
+
+ csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(rx_section_full));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(rx_almost_empty));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(rx_almost_full));
/* Setup Tx FIFO */
- iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
- &mac->tx_section_empty);
- iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
- iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
- iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+ csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(tx_section_empty));
+
+ csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(tx_section_full));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(tx_almost_empty));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(tx_almost_full));
/* MAC Address Configuration */
tse_update_mac_addr(priv, priv->dev->dev_addr);
/* MAC Function Configuration */
frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
- iowrite32(frm_length, &mac->frm_length);
- iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+ csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+ csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+ tse_csroffs(tx_ipg_length));
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
* start address
*/
- tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
- tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
- ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+ tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+ ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+ tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+ ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+ ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
/* Set the MAC options */
- cmd = ioread32(&mac->command_config);
+ cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
cmd &= ~MAC_CMDCFG_ETH_SPEED;
cmd &= ~MAC_CMDCFG_ENA_10;
- iowrite32(cmd, &mac->command_config);
+ csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
- iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+ csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+ tse_csroffs(pause_quanta));
if (netif_msg_hw(priv))
dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
*/
static void tse_set_mac(struct altera_tse_private *priv, bool enable)
{
- struct altera_tse_mac *mac = priv->mac_dev;
- u32 value = ioread32(&mac->command_config);
+ u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
if (enable)
value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
else
value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
- iowrite32(value, &mac->command_config);
+ csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
}
/* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
struct netdev_hw_addr *ha;
/* clear the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(0, &(mac->hash_table[i]));
+ csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
netdev_for_each_mc_addr(ha, dev) {
unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
hash = (hash << 1) | xor_bit;
}
- iowrite32(1, &(mac->hash_table[hash]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
}
}
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
static void altera_tse_set_mcfilterall(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
/* set the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(1, &(mac->hash_table[i]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
/* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if (dev->flags & IFF_PROMISC)
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
if (dev->flags & IFF_ALLMULTI)
altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
static void tse_set_rx_mode(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
!netdev_mc_empty(dev) || !netdev_uc_empty(dev))
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
else
- tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
spin_unlock(&priv->mac_cfg_lock);
}
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
of_property_read_bool(pdev->dev.of_node,
"altr,has-hash-multicast-filter");
+ /* Set hash filter to not set for now until the
+ * multicast filter receive issue is debugged
+ */
+ priv->hash_filter = 0;
+
/* get supplemental address settings for this instance */
priv->added_unicast =
of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
return 0;
}
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
.altera_dtype = ALTERA_DTYPE_SGDMA,
.dmamask = 32,
.reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
.start_rxdma = sgdma_start_rxdma,
};
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
.altera_dtype = ALTERA_DTYPE_MSGDMA,
.dmamask = 64,
.reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
#include "altera_tse.h"
#include "altera_utils.h"
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value |= bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value &= ~bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 1 : 0;
}
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 0 : 1;
}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b260913db236..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
+ bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
* cleaning methods - might be redundant but harmless.
*/
if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
- bnx2x_prev_unload_undi_mf(bp);
+ if (need_write) {
+ bnx2x_prev_unload_undi_mf(bp);
+ need_write = false;
+ }
} else if (prev_undi) {
/* If UNDI resides in memory,
* manually increment it
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 81cc2d9831c2..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2695,7 +2695,7 @@ out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return 0;
+ return rc;
}
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC 20000
+
+#define INFO_BLOCK_SIZE 0x10
+#define INFO_BLOCK_TYPE 0x0
+#define INFO_BLOCK_REV 0x2
+#define INFO_BLOCK_BLK_CNT 0x4
+#define INFO_BLOCK_TX_CHAN 0x4
+#define INFO_BLOCK_RX_CHAN 0x5
+#define INFO_BLOCK_OFFSET 0x8
+
+#define EC_MII_OFFSET 0x4
+#define EC_FIFO_OFFSET 0x8
+#define EC_MAC_OFFSET 0xc
+
+#define MAC_FRAME_ERR_CNT 0x0
+#define MAC_RX_ERR_CNT 0x1
+#define MAC_CRC_ERR_CNT 0x2
+#define MAC_LNK_LST_ERR_CNT 0x3
+#define MAC_TX_FRAME_CNT 0x10
+#define MAC_RX_FRAME_CNT 0x14
+#define MAC_TX_FIFO_LVL 0x20
+#define MAC_DROPPED_FRMS 0x28
+#define MAC_CONNECTED_CCAT_FLAG 0x78
+
+#define MII_MAC_ADDR 0x8
+#define MII_MAC_FILT_FLAG 0xe
+#define MII_LINK_STATUS 0xf
+
+#define FIFO_TX_REG 0x0
+#define FIFO_TX_RESET 0x8
+#define FIFO_RX_REG 0x10
+#define FIFO_RX_ADDR_VALID (1u << 31)
+#define FIFO_RX_RESET 0x18
+
+#define DMA_CHAN_OFFSET 0x1000
+#define DMA_CHAN_SIZE 0x8
+
+#define DMA_WINDOW_SIZE_MASK 0xfffffffc
+
+static struct pci_device_id ids[] = {
+ { PCI_DEVICE(0x15ec, 0x5000), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK 0xffffffu
+#define RXHDR_NEXT_VALID (1u << 31)
+ __le32 next;
+#define RXHDR_NEXT_RECV_FLAG 0x1
+ __le32 recv;
+#define RXHDR_LEN_MASK 0xfffu
+ __le16 len;
+ __le16 port;
+ __le32 reserved;
+ u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE 0x7e8
+struct rx_desc {
+ struct rx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+ __le16 len;
+#define TX_HDR_PORT_0 0x1
+#define TX_HDR_PORT_1 0x2
+ u8 port;
+ u8 ts_enable;
+#define TX_HDR_SENT 0x1
+ __le32 sent;
+ u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+ struct tx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE 64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+ u8 *buf;
+ size_t len;
+ dma_addr_t buf_phys;
+
+ u8 *alloc;
+ size_t alloc_len;
+ dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+ struct net_device *net_dev;
+
+ struct pci_dev *dev;
+
+ void * __iomem io;
+ void * __iomem dma_io;
+
+ struct hrtimer hrtimer;
+
+ int tx_dma_chan;
+ int rx_dma_chan;
+ void * __iomem ec_io;
+ void * __iomem fifo_io;
+ void * __iomem mii_io;
+ void * __iomem mac_io;
+
+ struct bhf_dma rx_buf;
+ struct rx_desc *rx_descs;
+ int rx_dnext;
+ int rx_dcount;
+
+ struct bhf_dma tx_buf;
+ struct tx_desc *tx_descs;
+ int tx_dcount;
+ int tx_dnext;
+
+ u64 stat_rx_bytes;
+ u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID 0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ dev_dbg(dev, "Frame error counter: %d\n",
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+ dev_dbg(dev, "RX error counter: %d\n",
+ ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+ dev_dbg(dev, "CRC error counter: %d\n",
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+ dev_dbg(dev, "TX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+ dev_dbg(dev, "RX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+ dev_dbg(dev, "TX fifo level: %d\n",
+ ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+ dev_dbg(dev, "Dropped frames: %d\n",
+ ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+ dev_dbg(dev, "Connected with CCAT slot: %d\n",
+ ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+ dev_dbg(dev, "Link status: %d\n",
+ ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+ iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+ iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+ iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+ iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+ iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+ iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+ iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+ u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+ u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+ iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+ return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+ if (unlikely(netif_queue_stopped(priv->net_dev))) {
+ /* Make sure that we perceive changes to tx_dnext. */
+ smp_rmb();
+
+ if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+ netif_wake_queue(priv->net_dev);
+ }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+ return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+ iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+ priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+ struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ while (ec_bhf_pkt_received(desc)) {
+ int pkt_size = (le16_to_cpu(desc->header.len) &
+ RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+ u8 *data = desc->data;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+ dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, pkt_size), data, pkt_size);
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+ priv->stat_rx_bytes += pkt_size;
+
+ netif_rx(skb);
+ } else {
+ dev_err_ratelimited(dev,
+ "Couldn't allocate a skb_buff for a packet of size %u\n",
+ pkt_size);
+ }
+
+ desc->header.recv = 0;
+
+ ec_bhf_add_rx_desc(priv, desc);
+
+ priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+ desc = &priv->rx_descs[priv->rx_dnext];
+ }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+ struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+ hrtimer);
+ ec_bhf_process_rx(priv);
+ ec_bhf_process_tx(priv);
+
+ if (!netif_running(priv->net_dev))
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+ return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+ unsigned block_count, i;
+ void * __iomem ec_info;
+
+ dev_dbg(dev, "Info block:\n");
+ dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+ dev_dbg(dev, "Revision of function: %x\n",
+ (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+ block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+ dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+ for (i = 0; i < block_count; i++) {
+ u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+ INFO_BLOCK_TYPE);
+ if (type == ETHERCAT_MASTER_ID)
+ break;
+ }
+ if (i == block_count) {
+ dev_err(dev, "EtherCAT master with DMA block not found\n");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+ ec_info = priv->io + i * INFO_BLOCK_SIZE;
+ dev_dbg(dev, "EtherCAT master revision: %d\n",
+ ioread16(ec_info + INFO_BLOCK_REV));
+
+ priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+ dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+ priv->tx_dma_chan);
+
+ priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+ dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+ priv->rx_dma_chan);
+
+ priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+ priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+ priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+ priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+ dev_dbg(dev,
+ "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+ priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+ return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct tx_desc *desc;
+ unsigned len;
+
+ dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+ desc = &priv->tx_descs[priv->tx_dnext];
+
+ skb_copy_and_csum_dev(skb, desc->data);
+ len = skb->len;
+
+ memset(&desc->header, 0, sizeof(desc->header));
+ desc->header.len = cpu_to_le16(len);
+ desc->header.port = TX_HDR_PORT_0;
+
+ ec_bhf_send_packet(priv, desc);
+
+ priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+ if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+ /* Make sure that update updates to tx_dnext are perceived
+ * by timer routine.
+ */
+ smp_wmb();
+
+ netif_stop_queue(net_dev);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+ ec_bhf_print_status(priv);
+ }
+
+ priv->stat_tx_bytes += len;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+ struct bhf_dma *buf,
+ int channel,
+ int size)
+{
+ int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+ struct device *dev = PRIV_TO_DEV(priv);
+ u32 mask;
+
+ iowrite32(0xffffffff, priv->dma_io + offset);
+
+ mask = ioread32(priv->dma_io + offset);
+ mask &= DMA_WINDOW_SIZE_MASK;
+ dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+ /* We want to allocate a chunk of memory that is:
+ * - aligned to the mask we just read
+ * - is of size 2^mask bytes (at most)
+ * In order to ensure that we will allocate buffer of
+ * 2 * 2^mask bytes.
+ */
+ buf->len = min_t(int, ~mask + 1, size);
+ buf->alloc_len = 2 * buf->len;
+
+ dev_dbg(dev, "Allocating %d bytes for channel %d",
+ (int)buf->alloc_len, channel);
+ buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+ GFP_KERNEL);
+ if (buf->alloc == NULL) {
+ dev_info(dev, "Failed to allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+ buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+ iowrite32(0, priv->dma_io + offset + 4);
+ iowrite32(buf->buf_phys, priv->dma_io + offset);
+ dev_dbg(dev, "Buffer: %x and read from dev: %x",
+ (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+ return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+ int i = 0;
+
+ priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+ priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+ priv->tx_dnext = 0;
+
+ for (i = 0; i < priv->tx_dcount; i++)
+ priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+ int i;
+
+ priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+ priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+ priv->rx_dnext = 0;
+
+ for (i = 0; i < priv->rx_dcount; i++) {
+ struct rx_desc *desc = &priv->rx_descs[i];
+ u32 next;
+
+ if (i != priv->rx_dcount - 1)
+ next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+ else
+ next = 0;
+ next |= RXHDR_NEXT_VALID;
+ desc->header.next = cpu_to_le32(next);
+ desc->header.recv = 0;
+ ec_bhf_add_rx_desc(priv, desc);
+ }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+ int err = 0;
+
+ dev_info(dev, "Opening device\n");
+
+ ec_bhf_reset(priv);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+ FIFO_SIZE * sizeof(struct rx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate rx buffer\n");
+ goto out;
+ }
+ ec_bhf_setup_rx_descs(priv);
+
+ dev_info(dev, "RX buffer allocated, address: %x\n",
+ (unsigned)priv->rx_buf.buf_phys);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+ FIFO_SIZE * sizeof(struct tx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate tx buffer\n");
+ goto error_rx_free;
+ }
+ dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+ (unsigned)priv->tx_buf.buf_phys);
+
+ iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+ ec_bhf_setup_tx_descs(priv);
+
+ netif_start_queue(net_dev);
+
+ hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+ HRTIMER_MODE_REL);
+
+ dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+ ec_bhf_print_status(priv);
+
+ return 0;
+
+error_rx_free:
+ dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+ priv->rx_buf.alloc_len);
+out:
+ return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ hrtimer_cancel(&priv->hrtimer);
+
+ ec_bhf_reset(priv);
+
+ netif_tx_disable(net_dev);
+
+ dma_free_coherent(dev, priv->tx_buf.alloc_len,
+ priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+ dma_free_coherent(dev, priv->rx_buf.alloc_len,
+ priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+ stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+ stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+ stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+ stats->tx_bytes = priv->stat_tx_bytes;
+ stats->rx_bytes = priv->stat_rx_bytes;
+
+ return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+ .ndo_start_xmit = ec_bhf_start_xmit,
+ .ndo_open = ec_bhf_open,
+ .ndo_stop = ec_bhf_stop,
+ .ndo_get_stats64 = ec_bhf_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct net_device *net_dev;
+ struct ec_bhf_priv *priv;
+ void * __iomem dma_io;
+ void * __iomem io;
+ int err = 0;
+
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+
+ pci_set_master(dev);
+
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ err = -EIO;
+ goto err_disable_dev;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ goto err_disable_dev;
+ }
+
+ err = pci_request_regions(dev, "ec_bhf");
+ if (err) {
+ dev_err(&dev->dev, "Failed to request pci memory regions\n");
+ goto err_disable_dev;
+ }
+
+ io = pci_iomap(dev, 0, 0);
+ if (!io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+ err = -EIO;
+ goto err_release_regions;
+ }
+
+ dma_io = pci_iomap(dev, 2, 0);
+ if (!dma_io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+ err = -EIO;
+ goto err_unmap;
+ }
+
+ net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+ if (net_dev == 0) {
+ err = -ENOMEM;
+ goto err_unmap_dma_io;
+ }
+
+ pci_set_drvdata(dev, net_dev);
+ SET_NETDEV_DEV(net_dev, &dev->dev);
+
+ net_dev->features = 0;
+ net_dev->flags |= IFF_NOARP;
+
+ net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ priv->io = io;
+ priv->dma_io = dma_io;
+ priv->dev = dev;
+
+ err = ec_bhf_setup_offsets(priv);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+ dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+ net_dev->dev_addr);
+
+ err = register_netdev(net_dev);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ return 0;
+
+err_free_net_dev:
+ free_netdev(net_dev);
+err_unmap_dma_io:
+ pci_iounmap(dev, dma_io);
+err_unmap:
+ pci_iounmap(dev, io);
+err_release_regions:
+ pci_release_regions(dev);
+err_disable_dev:
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+
+ return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(dev);
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+
+ pci_iounmap(dev, priv->dma_io);
+ pci_iounmap(dev, priv->io);
+ pci_release_regions(dev);
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+ .name = "ec_bhf",
+ .id_table = ids,
+ .probe = ec_bhf_probe,
+ .remove = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+ return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+ pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ /* On some BE3 FW versions, after a HW reset,
+ * interrupts will remain disabled for each function.
+ * So, explicitly enable interrupts
+ */
+ be_intr_set(adapter, true);
+
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
return idx;
}
-static void
+static int
jme_fill_tx_map(struct pci_dev *pdev,
struct txdesc *txdesc,
struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+ return -EINVAL;
+
pci_dma_sync_single_for_device(pdev,
dmaaddr,
len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
txbi->mapping = dmaaddr;
txbi->len = len;
+ return 0;
}
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ int mask = jme->tx_ring_mask;
+ int j;
+
+ for (j = 0 ; j < count ; j++) {
+ ctxbi = txbi + ((startidx + j + 2) & (mask));
+ pci_unmap_page(jme->pdev,
+ ctxbi->mapping,
+ ctxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
+ }
+
+}
+
+static int
jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
u32 len;
+ int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
frag = &skb_shinfo(skb)->frags[i];
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma);
+ if (ret) {
+ jme_drop_tx_map(jme, idx, i);
+ goto out;
+ }
+
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
ctxdesc = txdesc + ((idx + 1) & (mask));
ctxbi = txbi + ((idx + 1) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma);
+ if (ret)
+ jme_drop_tx_map(jme, idx, i);
+
+out:
+ return ret;
}
+
static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct txdesc *txdesc;
struct jme_buffer_info *txbi;
u8 flags;
+ int ret = 0;
txdesc = (struct txdesc *)txring->desc + idx;
txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
jme_tx_csum(jme, skb, &flags);
jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
- jme_map_tx_skb(jme, skb, idx);
+ ret = jme_map_tx_skb(jme, skb, idx);
+ if (ret)
+ return ret;
+
txdesc->desc1.flags = flags;
/*
* Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- jme_fill_tx_desc(jme, skb, idx);
+ if (jme_fill_tx_desc(jme, skb, idx))
+ return NETDEV_TX_OK;
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
},
{
.opcode = MLX4_CMD_UPDATE_QP,
- .has_inbox = false,
+ .has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_CMD_EPERM_wrapper
+ .wrapper = mlx4_UPDATE_QP_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ enum mlx4_update_qp_attr attr,
+ struct mlx4_update_qp_params *params)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_update_qp_context *cmd;
+ u64 pri_addr_path_mask = 0;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
+ if (attr & MLX4_UPDATE_QP_SMAC) {
+ pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+ cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+ }
+
+ cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+ err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3fdd4a1f7d..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd_info)
+{
+ int err;
+ u32 qpn = vhcr->in_modifier & 0xffffff;
+ struct res_qp *rqp;
+ u64 mac;
+ unsigned port;
+ u64 pri_addr_path_mask;
+ struct mlx4_update_qp_context *cmd;
+ int smac_index;
+
+ cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+ pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+ if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+ (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+ return -EPERM;
+
+ /* Just change the smac for the QP */
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err) {
+ mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+ return err;
+ }
+
+ port = (rqp->sched_queue >> 6 & 1) + 1;
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma,
+ vhcr->in_modifier, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+ goto err_mac;
+ }
+
+err_mac:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
-{
- int err;
-
- netdev->num_tx_queues = adapter->drv_tx_rings;
- netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
- err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
- if (err)
- netdev_err(netdev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
-
- return err;
-}
-
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bc914859e38..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
- err = qlcnic_set_real_num_queues(adapter, netdev);
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
if (err)
return err;
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
tx_ring->tx_stats.xmit_called,
tx_ring->tx_stats.xmit_on,
tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
netdev_info(netdev,
- "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
readl(tx_ring->crb_cmd_producer),
tx_ring->producer, tx_ring->sw_consumer,
le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ u8 tx_rings, rx_rings;
int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
return err;
}
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
efx->net_dev->rx_cpu_rmap = NULL;
#endif
- /* Disable MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
- /* Disable legacy interrupt */
- if (efx->legacy_irq)
+ if (EFX_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
+ }
}
/* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
__func__, ret);
- goto phy_error;
+ return ret;
}
}
@@ -1779,8 +1779,6 @@ init_error:
dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
-phy_error:
- clk_disable_unprepare(priv->stmmac_clk);
return ret;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
int i;
for (i = 0; i < N_TX_RINGS; i++)
- spin_lock(&cp->tx_lock[i]);
+ spin_lock_nested(&cp->tx_lock[i], i);
}
static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
-
- if (strncmp(mdio->name, "gpio", 4) == 0) {
- /* GPIO bitbang MDIO driver attached */
- struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, bus->id, phyid);
- } else {
- /* davinci MDIO driver attached */
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+ of_node_put(mdio_node);
+ if (!mdio) {
+ pr_err("Missing mdio platform device\n");
+ return -EINVAL;
}
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b0e2865a6810..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ }
}
static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+ return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
static void macvlan_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
static void macvlan_set_lockdep_class(struct net_device *dev)
{
- lockdep_set_class(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key,
+ macvlan_get_nest_level(dev));
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_add = macvlan_fdb_add,
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
+ .ndo_get_lock_subclass = macvlan_get_nest_level,
};
void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+ if (bus_id < 0) {
+ dev_warn(&pdev->dev, "failed to get alias id\n");
+ bus_id = 0;
+ }
} else {
pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0, do_suspend = 0;
+ bool needs_aneg = false, do_suspend = false, do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
case PHY_PENDING:
break;
case PHY_UP:
- needs_aneg = 1;
+ needs_aneg = true;
phydev->link_timeout = PHY_AN_TIMEOUT;
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
} else if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
break;
case PHY_NOLINK:
err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
netif_carrier_on(phydev->attached_dev);
} else {
if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
}
phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
phydev->link = 0;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
- do_suspend = 1;
+ do_suspend = true;
}
break;
case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
}
phydev->adjust_link(phydev->attached_dev);
}
+ do_resume = true;
break;
}
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
-
- if (do_suspend)
+ else if (do_suspend)
phy_suspend(phydev);
+ else if (do_resume)
+ phy_resume(phydev);
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
-
- phy_resume(phydev);
+ else
+ phy_resume(phydev);
return err;
}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
cdc_ncm_unbind(dev, intf);
}
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ return true;
+ }
+ return false;
+}
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
struct cdc_ncm_ctx *ctx = info->ctx;
__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
u16 tci = 0;
+ bool is_ip;
u8 *c;
if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
if (skb->len <= ETH_HLEN)
goto error;
+ /* Some applications using e.g. packet sockets will
+ * bypass the VLAN acceleration and create tagged
+ * ethernet frames directly. We primarily look for
+ * the accelerated out-of-band tag, but fall back if
+ * required
+ */
+ skb_reset_mac_header(skb);
+ if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+ __vlan_get_tag(skb, &tci) == 0) {
+ is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ skb_pull(skb, VLAN_ETH_HLEN);
+ } else {
+ is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+ skb_pull(skb, ETH_HLEN);
+ }
+
/* mapping VLANs to MBIM sessions:
* no tag => IPS session <0>
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
* 512 - 4095 => unsupported, drop
*/
- vlan_get_tag(skb, &tci);
-
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
- /* verify that datagram is IPv4 or IPv6 */
- skb_reset_mac_header(skb);
- switch (eth_hdr(skb)->h_proto) {
- case htons(ETH_P_IP):
- case htons(ETH_P_IPV6):
- break;
- default:
+ if (!is_ip)
goto error;
- }
c = (u8 *)&sign;
c[3] = tci;
break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
"unsupported tci=0x%04x\n", tci);
goto error;
}
- skb_pull(skb, ETH_HLEN);
}
spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
return;
/* need to send the NA on the VLAN dev, if any */
- if (tci)
+ rcu_read_lock();
+ if (tci) {
netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
tci);
- else
+ if (!netdev) {
+ rcu_read_unlock();
+ return;
+ }
+ } else {
netdev = dev->net;
- if (!netdev)
- return;
+ }
+ dev_hold(netdev);
+ rcu_read_unlock();
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
- return;
+ goto out;
is_router = !!in6_dev->cnf.forwarding;
in6_dev_put(in6_dev);
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
true /* solicited */,
false /* override */,
true /* inc_opt */);
+out:
+ dev_put(netdev);
}
static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
- bss_conf->enable_beacon)
+ bss_conf->enable_beacon) {
priv->reconfig_beacon = true;
+ priv->rearm_ani = true;
+ }
if (bss_conf->assoc) {
priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_stop_ani(priv);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
if (!err) {
/* only set 2G bandwidth using bw_cap command */
band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
- band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
sizeof(band_bwcap));
} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
* this number of packets were received (typically 1)
* @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
- * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 0-19: beacon interal in TUs (suspend before executing)
* bits 20-23: reserved
* bits 24-31: number of beacons (suspend between channels)
* @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
- * @max_out_time: max uSec to be out of assoceated channel
- * @suspend_time: pause scan this long when returning to service channel
+ * @max_out_time: max TUs to be out of assoceated channel
+ * @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
* @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
return;
- ieee80211_iterate_active_interfaces(
+ ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_mc_iface_iterator, &iter_data);
}
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_is_idle(mvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
return mvmvif->low_latency;
}
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
return;
}
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate */
if (lq_sta->dbg_fixed_rate) {
IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_bound);
- /*
- * Under low latency traffic passive scan is fragmented meaning
- * that dwell on a particular channel will be fragmented. Each fragment
- * dwell time is 20ms and fragments period is 105ms. Skipping to next
- * channel will be delayed by the same period - 105ms. So suspend_time
- * parameter describing both fragments and channels skipping periods is
- * set to 105ms. This value is chosen so that overall passive scan
- * duration will not be too long. Max_out_time in this case is set to
- * 70ms, so for active scanning operating channel will be left for 70ms
- * while for passive still for 20ms (fragment dwell).
- */
- if (global_bound) {
- if (!iwl_mvm_low_latency(mvm)) {
- params->suspend_time = ieee80211_tu_to_usec(100);
- params->max_out_time = ieee80211_tu_to_usec(600);
- } else {
- params->suspend_time = ieee80211_tu_to_usec(105);
- /* P2P doesn't support fragmented passive scan, so
- * configure max_out_time to be at least longest dwell
- * time for passive scan.
- */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
- params->max_out_time = ieee80211_tu_to_usec(70);
- params->passive_fragmented = true;
- } else {
- u32 passive_dwell;
- /*
- * Use band G so that passive channel dwell time
- * will be assigned with maximum value.
- */
- band = IEEE80211_BAND_2GHZ;
- passive_dwell = iwl_mvm_get_passive_dwell(band);
- params->max_out_time =
- ieee80211_tu_to_usec(passive_dwell);
- }
- }
+ if (!global_bound)
+ goto not_bound;
+
+ params->suspend_time = 100;
+ params->max_out_time = 600;
+
+ if (iwl_mvm_low_latency(mvm)) {
+ params->suspend_time = 250;
+ params->max_out_time = 250;
}
+not_bound:
+
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- if (params->passive_fragmented)
- params->dwell[band].passive = 20;
- else
- params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
+ params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band,
n_ssids);
}
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
- int tail = band_2ghz + band_5ghz;
+ int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result;
}
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *idle = _data;
+
+ if (!vif->bss_conf.idle)
+ *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+ bool idle = true;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_idle_iter, &idle);
+
+ return idle;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- trans->dev = &pdev->dev;
- trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_pci_disable_msi;
}
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_ict;
}
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
return trans;
out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
- int more_to_do = 0;
- unsigned long flags;
-
- /* It is necessary to disable IRQ before calling
- * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
- * lose event from the frontend.
- *
- * Consider:
- * RING_HAS_UNCONSUMED_REQUESTS
- * <frontend generates event to trigger napi_schedule>
- * __napi_complete
- *
- * This handler is still in scheduled state so the
- * event has no effect at all. After __napi_complete
- * this handler is descheduled and cannot get
- * scheduled again. We lose event in this case and the ring
- * will be completely stalled.
- */
-
- local_irq_save(flags);
-
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
- if (!more_to_do)
- __napi_complete(napi);
-
- local_irq_restore(flags);
+ napi_complete(napi);
+ xenvif_napi_schedule_or_enable_events(vif);
}
return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+ const int i,
+ const struct ubuf_info *ubuf)
+{
+ struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+
+ if (skb_shinfo(skb)->frags[i].page.p ==
+ foreign_vif->mmap_pages[pending_idx])
+ break;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+
+ return ubuf;
+}
+
+/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
- grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
- struct xenvif *foreign_vif = NULL;
+ const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod;
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
- if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
- (ubuf->callback == &xenvif_zerocopy_callback)) {
- int i = 0;
- foreign_vif = ubuf_to_vif(ubuf);
-
- do {
- u16 pending_idx = ubuf->desc;
- foreign_grefs[i++] =
- foreign_vif->pending_tx_info[pending_idx].req.gref;
- ubuf = (struct ubuf_info *) ubuf->ctx;
- } while (ubuf);
- }
-
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
for (i = 0; i < nr_frags; i++) {
+ /* This variable also signals whether foreign_gref has a real
+ * value or not.
+ */
+ struct xenvif *foreign_vif = NULL;
+ grant_ref_t foreign_gref;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ const struct ubuf_info *const startpoint = ubuf;
+
+ /* Ideally ubuf points to the chain element which
+ * belongs to this frag. Or if frags were removed from
+ * the beginning, then shortly before it.
+ */
+ ubuf = xenvif_find_gref(skb, i, ubuf);
+
+ /* Try again from the beginning of the list, if we
+ * haven't tried from there. This only makes sense in
+ * the unlikely event of reordering the original frags.
+ * For injected local pages it's an unnecessary second
+ * run.
+ */
+ if (unlikely(!ubuf) && startpoint != head_ubuf)
+ ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+ if (likely(ubuf)) {
+ u16 pending_idx = ubuf->desc;
+
+ foreign_vif = ubuf_to_vif(ubuf);
+ foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+ /* Just a safety measure. If this was the last
+ * element on the list, the for loop will
+ * iterate again if a local page were added to
+ * the end. Using head_ubuf here prevents the
+ * second search on the chain. Or the original
+ * frags changed order, but that's less likely.
+ * In any way, ubuf shouldn't be NULL.
+ */
+ ubuf = ubuf->ctx ?
+ (struct ubuf_info *) ubuf->ctx :
+ head_ubuf;
+ } else
+ /* This frag was a local page, added to the
+ * array after the skb left netback.
+ */
+ ubuf = head_ubuf;
+ }
xenvif_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
foreign_vif,
- foreign_grefs[i]);
+ foreign_vif ? foreign_gref : UINT_MAX);
}
return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
notify_remote_via_irq(vif->rx_irq);
}
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
{
int more_to_do;
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index bd47fbc53dc9..e8376d646d98 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -206,12 +206,13 @@ static struct platform_device *of_platform_device_create_pdata(
{
struct platform_device *dev;
- if (!of_device_is_available(np))
+ if (!of_device_is_available(np) ||
+ of_node_test_and_set_flag(np, OF_POPULATED))
return NULL;
dev = of_device_alloc(np, bus_id, parent);
if (!dev)
- return NULL;
+ goto err_clear_flag;
#if defined(CONFIG_MICROBLAZE)
dev->archdata.dma_mask = 0xffffffffUL;
@@ -229,10 +230,14 @@ static struct platform_device *of_platform_device_create_pdata(
if (of_device_add(dev) != 0) {
platform_device_put(dev);
- return NULL;
+ goto err_clear_flag;
}
return dev;
+
+err_clear_flag:
+ of_node_clear_flag(np, OF_POPULATED);
+ return NULL;
}
/**
@@ -264,14 +269,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
pr_debug("Creating amba device %s\n", node->full_name);
- if (!of_device_is_available(node))
+ if (!of_device_is_available(node) ||
+ of_node_test_and_set_flag(node, OF_POPULATED))
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
if (!dev) {
pr_err("%s(): amba_device_alloc() failed for %s\n",
__func__, node->full_name);
- return NULL;
+ goto err_clear_flag;
}
/* setup generic device info */
@@ -311,6 +317,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
err_free:
amba_device_put(dev);
+err_clear_flag:
+ of_node_clear_flag(node, OF_POPULATED);
return NULL;
}
#else /* CONFIG_ARM_AMBA */
@@ -487,4 +495,60 @@ int of_platform_populate(struct device_node *root,
return rc;
}
EXPORT_SYMBOL_GPL(of_platform_populate);
+
+static int of_platform_device_destroy(struct device *dev, void *data)
+{
+ bool *children_left = data;
+
+ /* Do not touch devices not populated from the device tree */
+ if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) {
+ *children_left = true;
+ return 0;
+ }
+
+ /* Recurse, but don't touch this device if it has any children left */
+ if (of_platform_depopulate(dev) != 0) {
+ *children_left = true;
+ return 0;
+ }
+
+ if (dev->bus == &platform_bus_type)
+ platform_device_unregister(to_platform_device(dev));
+#ifdef CONFIG_ARM_AMBA
+ else if (dev->bus == &amba_bustype)
+ amba_device_unregister(to_amba_device(dev));
+#endif
+ else {
+ *children_left = true;
+ return 0;
+ }
+
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
+/**
+ * of_platform_depopulate() - Remove devices populated from device tree
+ * @parent: device which childred will be removed
+ *
+ * Complementary to of_platform_populate(), this function removes children
+ * of the given device (and, recurrently, their children) that have been
+ * created from their respective device tree nodes (and only those,
+ * leaving others - eg. manually created - unharmed).
+ *
+ * Returns 0 when all children devices have been removed or
+ * -EBUSY when some children remained.
+ */
+int of_platform_depopulate(struct device *parent)
+{
+ bool children_left = false;
+
+ device_for_each_child(parent, &children_left,
+ of_platform_device_destroy);
+
+ return children_left ? -EBUSY : 0;
+}
+EXPORT_SYMBOL_GPL(of_platform_depopulate);
+
#endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 7f8b78c08879..8c148f39e8d7 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -148,7 +148,7 @@ static noinline void pci_wait_cfg(struct pci_dev *dev)
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = 0; \
+ int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
if (PCI_##size##_BAD) \
return -EINVAL; \
@@ -159,9 +159,7 @@ int pci_user_read_config_##size \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
- if (ret > 0) \
- ret = -EINVAL; \
- return ret; \
+ return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -170,7 +168,7 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = -EIO; \
+ int ret = PCIBIOS_SUCCESSFUL; \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
@@ -179,9 +177,7 @@ int pci_user_write_config_##size \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
- if (ret > 0) \
- ret = -EINVAL; \
- return ret; \
+ return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index fb8aed307c28..447d393725e1 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include "pci.h"
@@ -236,7 +235,7 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
*
* This adds add sysfs entries and start device drivers
*/
-int pci_bus_add_device(struct pci_dev *dev)
+void pci_bus_add_device(struct pci_dev *dev)
{
int retval;
@@ -253,8 +252,6 @@ int pci_bus_add_device(struct pci_dev *dev)
WARN_ON(retval < 0);
dev->is_added = 1;
-
- return 0;
}
/**
@@ -267,16 +264,12 @@ void pci_bus_add_devices(const struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child;
- int retval;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip already-added devices */
if (dev->is_added)
continue;
- retval = pci_bus_add_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error adding device (%d)\n",
- retval);
+ pci_bus_add_device(dev);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 47aaf22d814e..0e5f3c95af5b 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -3,7 +3,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/module.h>
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index a6f67ec8882f..21df477be0c8 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -33,4 +33,17 @@ config PCI_RCAR_GEN2
There are 3 internal PCI controllers available with a single
built-in EHCI/OHCI host controller present on each one.
+config PCI_RCAR_GEN2_PCIE
+ bool "Renesas R-Car PCIe controller"
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ help
+ Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+
+config PCI_HOST_GENERIC
+ bool "Generic PCI host controller"
+ depends on ARM && OF
+ help
+ Say Y here if you want to support a simple generic PCI host
+ controller, such as the one emulated by kvmtool.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 13fb3333aa05..611ba4b48c94 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -4,3 +4,5 @@ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 3de6bfbbe8e9..1632661c5b7f 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -415,9 +415,7 @@ static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
- dw_handle_msi_irq(pp);
-
- return IRQ_HANDLED;
+ return dw_handle_msi_irq(pp);
}
static void exynos_pcie_msi_init(struct pcie_port *pp)
@@ -511,7 +509,8 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
-static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+static int __init add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
{
int ret;
@@ -568,10 +567,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
GFP_KERNEL);
- if (!exynos_pcie) {
- dev_err(&pdev->dev, "no memory for exynos pcie\n");
+ if (!exynos_pcie)
return -ENOMEM;
- }
pp = &exynos_pcie->pp;
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
new file mode 100644
index 000000000000..44fe6aa6a43f
--- /dev/null
+++ b/drivers/pci/host/pci-host-generic.c
@@ -0,0 +1,388 @@
+/*
+ * Simple, generic PCI host controller driver targetting firmware-initialised
+ * systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+struct gen_pci_cfg_bus_ops {
+ u32 bus_shift;
+ void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
+};
+
+struct gen_pci_cfg_windows {
+ struct resource res;
+ struct resource bus_range;
+ void __iomem **win;
+
+ const struct gen_pci_cfg_bus_ops *ops;
+};
+
+struct gen_pci {
+ struct pci_host_bridge host;
+ struct gen_pci_cfg_windows cfg;
+ struct list_head resources;
+};
+
+static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range.start;
+
+ return pci->cfg.win[idx] + ((devfn << 8) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
+ .bus_shift = 16,
+ .map_bus = gen_pci_map_cfg_bus_cam,
+};
+
+static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range.start;
+
+ return pci->cfg.win[idx] + ((devfn << 12) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
+ .bus_shift = 20,
+ .map_bus = gen_pci_map_cfg_bus_ecam,
+};
+
+static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+
+ addr = pci->cfg.ops->map_bus(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+
+ addr = pci->cfg.ops->map_bus(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops gen_pci_ops = {
+ .read = gen_pci_config_read,
+ .write = gen_pci_config_write,
+};
+
+static const struct of_device_id gen_pci_of_match[] = {
+ { .compatible = "pci-host-cam-generic",
+ .data = &gen_pci_cfg_cam_bus_ops },
+
+ { .compatible = "pci-host-ecam-generic",
+ .data = &gen_pci_cfg_ecam_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
+
+static int gen_pci_calc_io_offset(struct device *dev,
+ struct of_pci_range *range,
+ struct resource *res,
+ resource_size_t *offset)
+{
+ static atomic_t wins = ATOMIC_INIT(0);
+ int err, idx, max_win;
+ unsigned int window;
+
+ if (!PAGE_ALIGNED(range->cpu_addr))
+ return -EINVAL;
+
+ max_win = (IO_SPACE_LIMIT + 1) / SZ_64K;
+ idx = atomic_inc_return(&wins);
+ if (idx > max_win)
+ return -ENOSPC;
+
+ window = (idx - 1) * SZ_64K;
+ err = pci_ioremap_io(window, range->cpu_addr);
+ if (err)
+ return err;
+
+ of_pci_range_to_resource(range, dev->of_node, res);
+ res->start = window;
+ res->end = res->start + range->size - 1;
+ *offset = window - range->pci_addr;
+ return 0;
+}
+
+static int gen_pci_calc_mem_offset(struct device *dev,
+ struct of_pci_range *range,
+ struct resource *res,
+ resource_size_t *offset)
+{
+ of_pci_range_to_resource(range, dev->of_node, res);
+ *offset = range->cpu_addr - range->pci_addr;
+ return 0;
+}
+
+static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
+{
+ struct pci_host_bridge_window *win;
+
+ list_for_each_entry(win, &pci->resources, list)
+ release_resource(win->res);
+
+ pci_free_resource_list(&pci->resources);
+}
+
+static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int err, res_valid = 0;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ struct resource *parent, *res;
+ resource_size_t offset;
+ u32 restype = range.flags & IORESOURCE_TYPE_BITS;
+
+ res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto out_release_res;
+ }
+
+ switch (restype) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = gen_pci_calc_io_offset(dev, &range, res, &offset);
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ err = gen_pci_calc_mem_offset(dev, &range, res, &offset);
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH || err);
+ break;
+ default:
+ err = -EINVAL;
+ continue;
+ }
+
+ if (err) {
+ dev_warn(dev,
+ "error %d: failed to add resource [type 0x%x, %lld bytes]\n",
+ err, restype, range.size);
+ continue;
+ }
+
+ err = request_resource(parent, res);
+ if (err)
+ goto out_release_res;
+
+ pci_add_resource_offset(&pci->resources, res, offset);
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+}
+
+static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+{
+ int err;
+ u8 bus_max;
+ resource_size_t busn;
+ struct resource *bus_range;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+
+ if (of_pci_parse_bus_range(np, &pci->cfg.bus_range))
+ pci->cfg.bus_range = (struct resource) {
+ .name = np->name,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_BUS,
+ };
+
+ err = of_address_to_resource(np, 0, &pci->cfg.res);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range),
+ sizeof(*pci->cfg.win), GFP_KERNEL);
+ if (!pci->cfg.win)
+ return -ENOMEM;
+
+ /* Limit the bus-range to fit within reg */
+ bus_max = pci->cfg.bus_range.start +
+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
+ pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end,
+ bus_max);
+
+ /* Map our Configuration Space windows */
+ if (!devm_request_mem_region(dev, pci->cfg.res.start,
+ resource_size(&pci->cfg.res),
+ "Configuration Space"))
+ return -ENOMEM;
+
+ bus_range = &pci->cfg.bus_range;
+ for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
+ u32 idx = busn - bus_range->start;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
+
+ pci->cfg.win[idx] = devm_ioremap(dev,
+ pci->cfg.res.start + busn * sz,
+ sz);
+ if (!pci->cfg.win[idx])
+ return -ENOMEM;
+ }
+
+ /* Register bus resource */
+ pci_add_resource(&pci->resources, bus_range);
+ return 0;
+}
+
+static int gen_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct gen_pci *pci = sys->private_data;
+ list_splice_init(&pci->resources, &sys->resources);
+ return 1;
+}
+
+static int gen_pci_probe(struct platform_device *pdev)
+{
+ int err;
+ const char *type;
+ const struct of_device_id *of_id;
+ const int *prop;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ struct hw_pci hw = {
+ .nr_controllers = 1,
+ .private_data = (void **)&pci,
+ .setup = gen_pci_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &gen_pci_ops,
+ };
+
+ if (!pci)
+ return -ENOMEM;
+
+ type = of_get_property(np, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+
+ of_id = of_match_node(gen_pci_of_match, np);
+ pci->cfg.ops = of_id->data;
+ pci->host.dev.parent = dev;
+ INIT_LIST_HEAD(&pci->host.windows);
+ INIT_LIST_HEAD(&pci->resources);
+
+ /* Parse our PCI ranges and request their resources */
+ err = gen_pci_parse_request_of_pci_ranges(pci);
+ if (err)
+ return err;
+
+ /* Parse and map our Configuration Space windows */
+ err = gen_pci_parse_map_cfg_windows(pci);
+ if (err) {
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+ }
+
+ pci_common_init_dev(dev, &hw);
+ return 0;
+}
+
+static struct platform_driver gen_pci_driver = {
+ .driver = {
+ .name = "pci-host-generic",
+ .owner = THIS_MODULE,
+ .of_match_table = gen_pci_of_match,
+ },
+ .probe = gen_pci_probe,
+};
+module_platform_driver(gen_pci_driver);
+
+MODULE_DESCRIPTION("Generic PCI host driver");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index ee082509b0ba..a5645ae4aef0 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -25,6 +25,7 @@
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
+#include <linux/interrupt.h>
#include "pcie-designware.h"
@@ -32,13 +33,9 @@
struct imx6_pcie {
int reset_gpio;
- int power_on_gpio;
- int wake_up_gpio;
- int disable_gpio;
- struct clk *lvds_gate;
- struct clk *sata_ref_100m;
- struct clk *pcie_ref_125m;
- struct clk *pcie_axi;
+ struct clk *pcie_bus;
+ struct clk *pcie_phy;
+ struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
void __iomem *mem_base;
@@ -231,36 +228,27 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
int ret;
- if (gpio_is_valid(imx6_pcie->power_on_gpio))
- gpio_set_value(imx6_pcie->power_on_gpio, 1);
-
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
- if (ret) {
- dev_err(pp->dev, "unable to enable sata_ref_100m\n");
- goto err_sata_ref;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
- goto err_pcie_ref;
+ dev_err(pp->dev, "unable to enable pcie_phy clock\n");
+ goto err_pcie_phy;
}
- ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+ ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
- dev_err(pp->dev, "unable to enable lvds_gate\n");
- goto err_lvds_gate;
+ dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+ goto err_pcie_bus;
}
- ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+ ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_axi\n");
- goto err_pcie_axi;
+ dev_err(pp->dev, "unable to enable pcie clock\n");
+ goto err_pcie;
}
/* allow the clocks to stabilize */
@@ -274,13 +262,11 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
}
return 0;
-err_pcie_axi:
- clk_disable_unprepare(imx6_pcie->lvds_gate);
-err_lvds_gate:
- clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
-err_pcie_ref:
- clk_disable_unprepare(imx6_pcie->sata_ref_100m);
-err_sata_ref:
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
return ret;
}
@@ -329,6 +315,13 @@ static int imx6_pcie_wait_for_link(struct pcie_port *pp)
return 0;
}
+static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
static int imx6_pcie_start_link(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
@@ -403,6 +396,9 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
imx6_pcie_start_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
}
static void imx6_pcie_reset_phy(struct pcie_port *pp)
@@ -487,15 +483,25 @@ static struct pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int imx6_add_pcie_port(struct pcie_port *pp,
+static int __init imx6_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
int ret;
- pp->irq = platform_get_irq(pdev, 0);
- if (!pp->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ imx6_pcie_msi_handler,
+ IRQF_SHARED, "mx6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return -ENODEV;
+ }
}
pp->root_bus_nr = -1;
@@ -546,69 +552,26 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
}
}
- imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
- if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev,
- imx6_pcie->power_on_gpio,
- GPIOF_OUT_INIT_LOW,
- "PCIe power enable");
- if (ret) {
- dev_err(&pdev->dev, "unable to get power-on gpio\n");
- return ret;
- }
- }
-
- imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
- if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev,
- imx6_pcie->wake_up_gpio,
- GPIOF_IN,
- "PCIe wake up");
- if (ret) {
- dev_err(&pdev->dev, "unable to get wake-up gpio\n");
- return ret;
- }
- }
-
- imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
- if (gpio_is_valid(imx6_pcie->disable_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev,
- imx6_pcie->disable_gpio,
- GPIOF_OUT_INIT_HIGH,
- "PCIe disable endpoint");
- if (ret) {
- dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
- return ret;
- }
- }
-
/* Fetch clocks */
- imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
- if (IS_ERR(imx6_pcie->lvds_gate)) {
- dev_err(&pdev->dev,
- "lvds_gate clock select missing or invalid\n");
- return PTR_ERR(imx6_pcie->lvds_gate);
- }
-
- imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
- if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+ imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy)) {
dev_err(&pdev->dev,
- "sata_ref_100m clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->sata_ref_100m);
+ "pcie_phy clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_phy);
}
- imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
- if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+ imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(imx6_pcie->pcie_bus)) {
dev_err(&pdev->dev,
- "pcie_ref_125m clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_ref_125m);
+ "pcie_bus clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_bus);
}
- imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
- if (IS_ERR(imx6_pcie->pcie_axi)) {
+ imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(imx6_pcie->pcie)) {
dev_err(&pdev->dev,
- "pcie_axi clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_axi);
+ "pcie clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie);
}
/* Grab GPR config register range */
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 4fe349dcaf59..3ef854f5a5b5 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -99,6 +99,7 @@ struct rcar_pci_priv {
struct resource io_res;
struct resource mem_res;
struct resource *cfg_res;
+ unsigned busnr;
int irq;
unsigned long window_size;
};
@@ -318,8 +319,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
- /* Setup bus number based on platform device id */
- sys->busnr = to_platform_device(priv->dev)->id;
+ /* Setup bus number based on platform device id / of bus-range */
+ sys->busnr = priv->busnr;
return 1;
}
@@ -372,6 +373,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_size = SZ_1G;
+ if (pdev->dev.of_node) {
+ struct resource busnr;
+ int ret;
+
+ ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse bus-range\n");
+ return ret;
+ }
+
+ priv->busnr = busnr.start;
+ if (busnr.end != busnr.start)
+ dev_warn(&pdev->dev, "only one bus number supported\n");
+ } else {
+ priv->busnr = pdev->id;
+ }
+
hw_private[0] = priv;
memset(&hw, 0, sizeof(hw));
hw.nr_controllers = ARRAY_SIZE(hw_private);
@@ -383,11 +401,20 @@ static int rcar_pci_probe(struct platform_device *pdev)
return 0;
}
+static struct of_device_id rcar_pci_of_match[] = {
+ { .compatible = "renesas,pci-r8a7790", },
+ { .compatible = "renesas,pci-r8a7791", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
+
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
+ .of_match_table = rcar_pci_of_match,
},
.probe = rcar_pci_probe,
};
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c4e373294476..e3bf9e6d5d9a 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -156,15 +156,17 @@ static struct irq_chip dw_msi_irq_chip = {
};
/* MSI int handler */
-void dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
unsigned long val;
int i, pos, irq;
+ irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
(u32 *)&val);
if (val) {
+ ret = IRQ_HANDLED;
pos = 0;
while ((pos = find_next_bit(&val, 32, pos)) != 32) {
irq = irq_find_mapping(pp->irq_domain,
@@ -177,6 +179,8 @@ void dw_handle_msi_irq(struct pcie_port *pp)
}
}
}
+
+ return ret;
}
void dw_pcie_msi_init(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 3063b3594d88..a169d22d517e 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -68,7 +68,7 @@ struct pcie_host_ops {
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
-void dw_handle_msi_irq(struct pcie_port *pp);
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
new file mode 100644
index 000000000000..8e06124aa80f
--- /dev/null
+++ b/drivers/pci/host/pcie-rcar.c
@@ -0,0 +1,1008 @@
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "rcar-pcie"
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define CONFIG_SEND_ENABLE (1 << 31)
+#define TYPE0 (0 << 8)
+#define TYPE1 (1 << 8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define CFINIT 1
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE 1
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST (1 << 4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE 1
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH (1 << 3)
+#define LAM_64BIT (1 << 2)
+#define LAR_ENABLE (1 << 1)
+
+/* PCIe address reg & mask */
+#define PCIEPARL(x) (0x03400 + ((x) * 0x20))
+#define PCIEPARH(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE (1 << 31)
+#define IO_SPACE (1 << 8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR1 0x011004
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define MACCTLR 0x011058
+#define SCRAMBLE_DISABLE (1 << 27)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD (1 << 16)
+#define PHY_ACK (1 << 24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+#define H1_PCIEPHYSR 0x040018
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0)+(x))
+#define RPMCAP(x) (PMCAP(0)+(x))
+#define REXPCAP(x) (EXPCAP(0)+(x))
+#define RVCCAP(x) (VCCAP(0)+(x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct msi_chip chip;
+ unsigned long pages;
+ struct mutex lock;
+ int irq1;
+ int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip)
+{
+ return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct resource res[PCI_MAX_RESOURCES];
+ struct resource busn;
+ int root_bus_nr;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+};
+
+static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
+{
+ return readl(pcie->base + reg);
+}
+
+enum {
+ PCI_ACCESS_READ,
+ PCI_ACCESS_WRITE,
+};
+
+static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+{
+ int shift = 8 * (where & 3);
+ u32 val = pci_read_reg(pcie, where & ~3);
+
+ val &= ~(mask << shift);
+ val |= data << shift;
+ pci_write_reg(pcie, val, where & ~3);
+}
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ int shift = 8 * (where & 3);
+ u32 val = pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie *pcie,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ int dev, func, reg, index;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == PCI_ACCESS_READ) {
+ *data = pci_read_reg(pcie, PCICONF(index));
+ } else {
+ /* Keep an eye out for changes to the root bus number */
+ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = *data & 0xff;
+
+ pci_write_reg(pcie, *data, PCICONF(index));
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (pcie->root_bus_nr < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear errors */
+ pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
+ PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (bus->parent->number == pcie->root_bus_nr)
+ pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ else
+ pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == PCI_ACCESS_READ)
+ *data = pci_read_reg(pcie, PCIECDR);
+ else
+ pci_write_reg(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
+ return ret;
+ }
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x "
+ "where=0x%04x size=%d val=0x%08lx\n", bus->number,
+ devfn, where, size, (unsigned long)*val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int shift, ret;
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x "
+ "where=0x%04x size=%d val=0x%08lx\n", bus->number,
+ devfn, where, size, (unsigned long)val);
+
+ if (size == 1) {
+ shift = 8 * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = 8 * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static void rcar_pcie_setup_window(int win, struct resource *res,
+ struct rcar_pcie *pcie)
+{
+ /* Setup PCIe address space mappings for each resource */
+ resource_size_t size;
+ u32 mask;
+
+ pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+
+ /*
+ * The PAMR mask is calculated in units of 128Bytes, which
+ * keeps things pretty simple.
+ */
+ size = resource_size(res);
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+
+ pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
+ pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
+
+ /* First resource is for IO */
+ mask = PAR_ENABLE;
+ if (res->flags & IORESOURCE_IO)
+ mask |= IO_SPACE;
+
+ pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+}
+
+static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(sys);
+ struct resource *res;
+ int i;
+
+ pcie->root_bus_nr = -1;
+
+ /* Setup PCI resources */
+ for (i = 0; i < PCI_MAX_RESOURCES; i++) {
+
+ res = &pcie->res[i];
+ if (!res->flags)
+ continue;
+
+ rcar_pcie_setup_window(i, res, pcie);
+
+ if (res->flags & IORESOURCE_IO)
+ pci_ioremap_io(nr * SZ_64K, res->start);
+ else
+ pci_add_resource(&sys->resources, res);
+ }
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ return 1;
+}
+
+static void rcar_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+
+ bus->msi = &pcie->msi.chip;
+ }
+}
+
+struct hw_pci rcar_pci = {
+ .setup = rcar_pcie_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &rcar_pcie_ops,
+ .add_bus = rcar_pcie_add_bus,
+};
+
+static void rcar_pcie_enable(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+
+ rcar_pci.nr_controllers = 1;
+ rcar_pci.private_data = (void **)&pcie;
+
+ pci_common_init_dev(&pdev->dev, &rcar_pci);
+#ifdef CONFIG_PCI_DOMAINS
+ rcar_pci.domain++;
+#endif
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, unsigned int addr,
+ unsigned int lane, unsigned int data)
+{
+ unsigned long phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ while (timeout--) {
+ if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ pci_write_reg(pcie, 1, PCIEMSR);
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0);
+
+ /* Enable MAC data scrambling. */
+ rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
+
+ /* Finish initialization - establish a PCI Express link */
+ pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ /* Enable slave Bus Mastering */
+ rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK,
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ while (timeout--) {
+ if (pci_read_reg(pcie, H1_PCIEPHYSR))
+ return rcar_pcie_hw_init(pcie);
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+ mutex_lock(&chip->lock);
+ clear_bit(irq, chip->used);
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie *pcie = data;
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long reg;
+
+ reg = pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ unsigned int irq;
+
+ /* clear the interrupt */
+ pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(pcie->dev, "unhandled MSI\n");
+ } else {
+ /* Unknown MSI, just clear it */
+ dev_dbg(pcie->dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = rcar_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq) {
+ rcar_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+ .name = "R-Car PCIe MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = rcar_msi_map,
+};
+
+static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long base;
+ int err;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup MSI data target */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ base = virt_to_phys((void *)msi->pages);
+
+ pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+ pci_write_reg(pcie, 0, PCIEMSIAUR);
+
+ /* enable all MSI interrupts */
+ pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+
+ return 0;
+
+err:
+ irq_domain_remove(msi->domain);
+ return err;
+}
+
+static int rcar_pcie_get_resources(struct platform_device *pdev,
+ struct rcar_pcie *pcie)
+{
+ struct resource res;
+ int err, i;
+
+ err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(pcie->clk)) {
+ dev_err(pcie->dev, "cannot get platform clock\n");
+ return PTR_ERR(pcie->clk);
+ }
+ err = clk_prepare_enable(pcie->clk);
+ if (err)
+ goto fail_clk;
+
+ pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(pcie->bus_clk)) {
+ dev_err(pcie->dev, "cannot get pcie bus clock\n");
+ err = PTR_ERR(pcie->bus_clk);
+ goto fail_clk;
+ }
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err)
+ goto err_map_reg;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (i < 0) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ if (i < 0) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq2 = i;
+
+ pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(pcie->base)) {
+ err = PTR_ERR(pcie->base);
+ goto err_map_reg;
+ }
+
+ return 0;
+
+err_map_reg:
+ clk_disable_unprepare(pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(pcie->clk);
+
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct of_pci_range *range,
+ int *index)
+{
+ u64 restype = range->flags;
+ u64 cpu_addr = range->cpu_addr;
+ u64 cpu_end = range->cpu_addr + range->size;
+ u64 pci_addr = range->pci_addr;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size;
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ /*
+ * If the size of the range is larger than the alignment of the start
+ * address, we have to use multiple entries to perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+ size = min(range->size, alignment);
+ } else {
+ size = range->size;
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ while (cpu_addr < cpu_end) {
+ /*
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
+ */
+ pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+ pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+
+ pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
+ pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+ pci_write_reg(pcie, 0, PCIELAMR(idx+1));
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+
+ if (idx > MAX_NR_INBOUND_MAPS) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int index = 0;
+ int err;
+
+ if (pci_dma_range_parser_init(&parser, np))
+ return -EINVAL;
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+ dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+
+ err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie;
+ unsigned int data;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ const struct of_device_id *of_id;
+ int err, win = 0;
+ int (*hw_init_fn)(struct rcar_pcie *);
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the bus range */
+ if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) {
+ dev_err(&pdev->dev, "failed to parse bus-range property\n");
+ return -EINVAL;
+ }
+
+ if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) {
+ dev_err(&pdev->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ err = rcar_pcie_get_resources(pdev, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ of_pci_range_to_resource(&range, pdev->dev.of_node,
+ &pcie->res[win++]);
+
+ if (win > PCI_MAX_RESOURCES)
+ break;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+ hw_init_fn = of_id->data;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = hw_init_fn(pcie);
+ if (err) {
+ dev_info(&pdev->dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = pci_read_reg(pcie, MACSR);
+ dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ rcar_pcie_enable(pcie);
+
+ return 0;
+}
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rcar_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+module_platform_driver(rcar_pcie_driver);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
index 6258dc260d9f..c68366cee6b7 100644
--- a/drivers/pci/hotplug-pci.c
+++ b/drivers/pci/hotplug-pci.c
@@ -4,7 +4,7 @@
#include <linux/export.h>
#include "pci.h"
-int __ref pci_hp_add_bridge(struct pci_dev *dev)
+int pci_hp_add_bridge(struct pci_dev *dev)
{
struct pci_bus *parent = dev->bus;
int pass, busnr, start = parent->busn_res.start;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index bccc27ee1030..75e178330215 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -41,7 +41,6 @@
#define pr_fmt(fmt) "acpiphp_glue: " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -501,7 +500,7 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
* This function should be called per *physical slot*,
* not per each slot object in ACPI namespace.
*/
-static void __ref enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot)
{
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
@@ -516,8 +515,7 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+ if (pci_is_bridge(dev)) {
max = pci_scan_bridge(bus, dev, max, pass);
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 8c1464851768..f6ef64c2ccb5 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot)
* Device configuration functions
*/
-int __ref cpci_configure_slot(struct slot *slot)
+int cpci_configure_slot(struct slot *slot)
{
struct pci_dev *dev;
struct pci_bus *parent;
@@ -289,8 +289,7 @@ int __ref cpci_configure_slot(struct slot *slot)
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ if (pci_is_bridge(dev))
pci_hp_add_bridge(dev);
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 11845b796799..f593585f2784 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -709,7 +709,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
temp = temp->next;
}
- temp->next = max->next;
+ if (temp)
+ temp->next = max->next;
}
max->next = NULL;
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 76ba8a1c774d..9600a392eaae 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -34,7 +34,6 @@
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/init.h>
#include <asm/uaccess.h>
#include "cpqphp.h"
#include "cpqphp_nvram.h"
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 8a66866b8cf1..8e9012dca450 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -127,7 +127,7 @@ struct controller {
#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
-#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
+#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
int pciehp_sysfs_enable_slot(struct slot *slot);
int pciehp_sysfs_disable_slot(struct slot *slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index d7d058fa19a4..1463412cf7f8 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -159,6 +159,8 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
if (!ctrl->no_cmd_complete) {
/*
* After 1 sec and CMD_COMPLETED still not set, just
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 1b533060ce65..b6cb1df67097 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -62,8 +62,7 @@ int pciehp_configure_device(struct slot *p_slot)
}
list_for_each_entry(dev, &parent->devices, bus_list)
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ if (pci_is_bridge(dev))
pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 16f920352317..e246a10a0d2c 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -160,8 +160,7 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- if (dev->bus)
- pcie_bus_configure_settings(dev->bus);
+ pcie_bus_configure_settings(dev->bus);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 4fcdeedda31b..7660232ef460 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -157,8 +157,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
}
/* Scan below the new bridge */
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
of_scan_pci_bridge(dev);
/* Map IO space for child bus, which may or may not succeed */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 4796c15fba94..984d708552f6 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -223,16 +223,16 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
type_tmp = (char *) &types[1];
/* Iterate through parent properties, looking for my-drc-index */
- for (i = 0; i < indexes[0]; i++) {
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
if ((unsigned int) indexes[i + 1] == *my_index) {
if (drc_name)
*drc_name = name_tmp;
if (drc_type)
*drc_type = type_tmp;
if (drc_index)
- *drc_index = *my_index;
+ *drc_index = be32_to_cpu(*my_index);
if (drc_power_domain)
- *drc_power_domain = domains[i+1];
+ *drc_power_domain = be32_to_cpu(domains[i+1]);
return 0;
}
name_tmp += (strlen(name_tmp) + 1);
@@ -321,16 +321,19 @@ int rpaphp_add_slot(struct device_node *dn)
/* register PCI devices */
name = (char *) &names[1];
type = (char *) &types[1];
- for (i = 0; i < indexes[0]; i++) {
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
+ int index;
- slot = alloc_slot_struct(dn, indexes[i + 1], name, power_domains[i + 1]);
+ index = be32_to_cpu(indexes[i + 1]);
+ slot = alloc_slot_struct(dn, index, name,
+ be32_to_cpu(power_domains[i + 1]));
if (!slot)
return -ENOMEM;
slot->type = simple_strtoul(type, NULL, 10);
dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
- indexes[i + 1], name, type);
+ index, name, type);
retval = rpaphp_enable_slot(slot);
if (!retval)
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 8d2ce22151eb..d1332d2f8730 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/init.h>
#include <asm/pci_debug.h>
#include <asm/sclp.h>
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 2bf69fe1926c..9202d133485c 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,7 +34,7 @@
#include "../pci.h"
#include "shpchp.h"
-int __ref shpchp_configure_device(struct slot *p_slot)
+int shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
struct controller *ctrl = p_slot->ctrl;
@@ -64,8 +64,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
list_for_each_entry(dev, &parent->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ if (pci_is_bridge(dev))
pci_hp_add_bridge(dev);
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index de7a74782f92..cb6f24740ee3 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -106,7 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
- rc = pci_bus_add_device(virtfn);
+ pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 955ab7990c5b..27a7e67ddfe4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/ioport.h>
#include <linux/pci.h>
@@ -544,22 +543,18 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
if (!msi_attrs)
return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
- char *name = kmalloc(20, GFP_KERNEL);
- if (!name)
- goto error_attrs;
-
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr) {
- kfree(name);
+ if (!msi_dev_attr)
goto error_attrs;
- }
+ msi_attrs[count] = &msi_dev_attr->attr;
- sprintf(name, "%d", entry->irq);
sysfs_attr_init(&msi_dev_attr->attr);
- msi_dev_attr->attr.name = name;
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
msi_dev_attr->attr.mode = S_IRUGO;
msi_dev_attr->show = msi_mode_show;
- msi_attrs[count] = &msi_dev_attr->attr;
++count;
}
@@ -883,50 +878,6 @@ int pci_msi_vec_count(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_msi_vec_count);
-/**
- * pci_enable_msi_block - configure device's MSI capability structure
- * @dev: device to configure
- * @nvec: number of interrupts to configure
- *
- * Allocate IRQs for a device with the MSI capability.
- * This function returns a negative errno if an error occurs. If it
- * is unable to allocate the number of interrupts requested, it returns
- * the number of interrupts it might be able to allocate. If it successfully
- * allocates at least the number of interrupts requested, it returns 0 and
- * updates the @dev's irq member to the lowest new interrupt number; the
- * other interrupt numbers allocated to this device are consecutive.
- */
-int pci_enable_msi_block(struct pci_dev *dev, int nvec)
-{
- int status, maxvec;
-
- if (dev->current_state != PCI_D0)
- return -EINVAL;
-
- maxvec = pci_msi_vec_count(dev);
- if (maxvec < 0)
- return maxvec;
- if (nvec > maxvec)
- return maxvec;
-
- status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
- if (status)
- return status;
-
- WARN_ON(!!dev->msi_enabled);
-
- /* Check whether driver already requested MSI-X irqs */
- if (dev->msix_enabled) {
- dev_info(&dev->dev, "can't enable MSI "
- "(MSI-X already enabled)\n");
- return -EINVAL;
- }
-
- status = msi_capability_init(dev, nvec);
- return status;
-}
-EXPORT_SYMBOL(pci_enable_msi_block);
-
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
@@ -1132,14 +1083,45 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- int nvec = maxvec;
+ int nvec;
int rc;
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ WARN_ON(!!dev->msi_enabled);
+
+ /* Check whether driver already requested MSI-X irqs */
+ if (dev->msix_enabled) {
+ dev_info(&dev->dev,
+ "can't enable MSI (MSI-X already enabled)\n");
+ return -EINVAL;
+ }
+
if (maxvec < minvec)
return -ERANGE;
+ nvec = pci_msi_vec_count(dev);
+ if (nvec < 0)
+ return nvec;
+ else if (nvec < minvec)
+ return -EINVAL;
+ else if (nvec > maxvec)
+ nvec = maxvec;
+
+ do {
+ rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
do {
- rc = pci_enable_msi_block(dev, nvec);
+ rc = msi_capability_init(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index f49abef88485..ca4927ba8433 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -309,13 +309,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
- /*
- * pci_is_bridge() is not suitable here, because pci_dev->subordinate
- * is set only after acpi_pci_find_device() has been called for the
- * given device.
- */
- check_children = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
- || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
+ check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d911e0c1f359..837d71f5390b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -107,7 +107,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
subdevice=PCI_ANY_ID, class=0, class_mask=0;
unsigned long driver_data=0;
int fields=0;
- int retval;
+ int retval = 0;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
&vendor, &device, &subvendor, &subdevice,
@@ -115,6 +115,26 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
if (fields < 2)
return -EINVAL;
+ if (fields != 7) {
+ struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->vendor = vendor;
+ pdev->device = device;
+ pdev->subsystem_vendor = subvendor;
+ pdev->subsystem_device = subdevice;
+ pdev->class = class;
+
+ if (pci_match_id(pdrv->id_table, pdev))
+ retval = -EEXIST;
+
+ kfree(pdev);
+
+ if (retval)
+ return retval;
+ }
+
/* Only accept driver_data values that match an existing id_table
entry */
if (ids) {
@@ -216,6 +236,13 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
return NULL;
}
+static const struct pci_device_id pci_device_id_any = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+};
+
/**
* pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
* @drv: the PCI driver to match against
@@ -229,18 +256,30 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
struct pci_dev *dev)
{
struct pci_dynid *dynid;
+ const struct pci_device_id *found_id = NULL;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
/* Look at the dynamic ids first, before the static ones */
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (pci_match_one_device(&dynid->id, dev)) {
- spin_unlock(&drv->dynids.lock);
- return &dynid->id;
+ found_id = &dynid->id;
+ break;
}
}
spin_unlock(&drv->dynids.lock);
- return pci_match_id(drv->id_table, dev);
+ if (!found_id)
+ found_id = pci_match_id(drv->id_table, dev);
+
+ /* driver_override will always match, send a dummy id */
+ if (!found_id && dev->driver_override)
+ found_id = &pci_device_id_any;
+
+ return found_id;
}
struct drv_dev_and_id {
@@ -580,14 +619,14 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
pci_fixup_device(pci_fixup_resume, pci_dev);
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_enable_wake(pci_dev, PCI_D0, false);
}
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
/* Disable non-bridge devices without PM support */
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_disable_enabled_device(pci_dev);
}
@@ -717,7 +756,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -971,7 +1010,7 @@ static int pci_pm_poweroff_noirq(struct device *dev)
return error;
}
- if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
pci_prepare_to_sleep(pci_dev);
/*
@@ -1325,8 +1364,6 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENODEV;
pdev = to_pci_dev(dev);
- if (!pdev)
- return -ENODEV;
if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
return -ENOMEM;
@@ -1347,6 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
(u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
(u8)(pdev->class)))
return -ENOMEM;
+
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 4e0acefb7565..84c350994b06 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -416,6 +417,20 @@ static ssize_t d3cold_allowed_show(struct device *dev,
static DEVICE_ATTR_RW(d3cold_allowed);
#endif
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct device_node *np = pci_device_to_OF_node(pdev);
+
+ if (np == NULL || np->full_name == NULL)
+ return 0;
+ return sprintf(buf, "%s", np->full_name);
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
#ifdef CONFIG_PCI_IOV
static ssize_t sriov_totalvfs_show(struct device *dev,
struct device_attribute *attr,
@@ -499,6 +514,45 @@ static struct device_attribute sriov_numvfs_attr =
sriov_numvfs_show, sriov_numvfs_store);
#endif /* CONFIG_PCI_IOV */
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *driver_override, *old = pdev->driver_override, *cp;
+
+ if (count > PATH_MAX)
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ pdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ pdev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%s\n", pdev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *pci_dev_attrs[] = {
&dev_attr_resource.attr,
&dev_attr_vendor.attr,
@@ -521,6 +575,10 @@ static struct attribute *pci_dev_attrs[] = {
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
&dev_attr_d3cold_allowed.attr,
#endif
+#ifdef CONFIG_OF
+ &dev_attr_devspec.attr,
+#endif
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -1255,11 +1313,6 @@ static struct bin_attribute pcie_config_attr = {
.write = pci_write_config,
};
-int __weak pcibios_add_platform_entries(struct pci_dev *dev)
-{
- return 0;
-}
-
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -1375,11 +1428,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
pdev->rom_attr = attr;
}
- /* add platform-specific attributes */
- retval = pcibios_add_platform_entries(pdev);
- if (retval)
- goto err_rom_file;
-
/* add sysfs entries for various capabilities */
retval = pci_create_capabilities_sysfs(pdev);
if (retval)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 759475ef6ff3..7ae7aa0166b6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1468,6 +1468,17 @@ void __weak pcibios_release_device(struct pci_dev *dev) {}
*/
void __weak pcibios_disable_device (struct pci_dev *dev) {}
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
@@ -3306,8 +3317,27 @@ static void pci_dev_unlock(struct pci_dev *dev)
pci_cfg_access_unlock(dev);
}
+/**
+ * pci_reset_notify - notify device driver of reset
+ * @dev: device to be notified of reset
+ * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
+ * completed
+ *
+ * Must be called prior to device access being disabled and after device
+ * access is restored.
+ */
+static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+{
+ const struct pci_error_handlers *err_handler =
+ dev->driver ? dev->driver->err_handler : NULL;
+ if (err_handler && err_handler->reset_notify)
+ err_handler->reset_notify(dev, prepare);
+}
+
static void pci_dev_save_and_disable(struct pci_dev *dev)
{
+ pci_reset_notify(dev, true);
+
/*
* Wake-up device prior to save. PM registers default to D0 after
* reset and a simple register restore doesn't reliably return
@@ -3329,6 +3359,7 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
static void pci_dev_restore(struct pci_dev *dev)
{
pci_restore_state(dev);
+ pci_reset_notify(dev, false);
}
static int pci_dev_reset(struct pci_dev *dev, int probe)
@@ -3345,6 +3376,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
return rc;
}
+
/**
* __pci_reset_function - reset a PCI device function
* @dev: PCI device to reset
@@ -4126,7 +4158,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
u16 cmd;
int rc;
- WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
+ WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
/* ARCH specific VGA enables */
rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6bd082299e31..0601890db22d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -77,7 +77,7 @@ static inline void pci_wakeup_event(struct pci_dev *dev)
pm_wakeup_event(&dev->dev, 100);
}
-static inline bool pci_is_bridge(struct pci_dev *pci_dev)
+static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
}
@@ -201,11 +201,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
-void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
-void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct list_head *realloc_head,
- struct list_head *fail_head);
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 986f8eadfd39..2f0ce668a775 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -99,7 +99,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
for (i = 0; i < nr_entries; i++)
msix_entries[i].entry = i;
- status = pci_enable_msix(dev, msix_entries, nr_entries);
+ status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
if (status)
goto Exit;
@@ -171,7 +171,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
pci_disable_msix(dev);
/* Now allocate the MSI-X vectors for real */
- status = pci_enable_msix(dev, msix_entries, nvec);
+ status = pci_enable_msix_exact(dev, msix_entries, nvec);
if (status)
goto Exit;
}
@@ -379,10 +379,13 @@ int pcie_port_device_register(struct pci_dev *dev)
/*
* Initialize service irqs. Don't use service devices that
* require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
*/
status = init_service_irqs(dev, irqs, capabilities);
if (status) {
- capabilities &= PCIE_PORT_SERVICE_VC;
+ capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ef09f5f2fe6c..2bbf5221afb3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -171,9 +171,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
+ u64 l64, sz64, mask64;
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
- bool bar_too_big = false, bar_disabled = false;
+ bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
@@ -226,9 +227,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
if (res->flags & IORESOURCE_MEM_64) {
- u64 l64 = l;
- u64 sz64 = sz;
- u64 mask64 = mask | (u64)~0 << 32;
+ l64 = l;
+ sz64 = sz;
+ mask64 = mask | (u64)~0 << 32;
pci_read_config_dword(dev, pos + 4, &l);
pci_write_config_dword(dev, pos + 4, ~0);
@@ -243,19 +244,22 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (!sz64)
goto fail;
- if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
+ if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
+ sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ res->start = 0;
+ res->end = 0;
bar_too_big = true;
- goto fail;
+ goto out;
}
- if ((sizeof(resource_size_t) < 8) && l) {
- /* Address above 32-bit boundary; disable the BAR */
- pci_write_config_dword(dev, pos, 0);
- pci_write_config_dword(dev, pos + 4, 0);
+ if ((sizeof(dma_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
res->flags |= IORESOURCE_UNSET;
- region.start = 0;
- region.end = sz64;
- bar_disabled = true;
+ res->start = 0;
+ res->end = sz64;
+ bar_too_high = true;
+ goto out;
} else {
region.start = l64;
region.end = l64 + sz64;
@@ -285,11 +289,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
* be claimed by the device.
*/
if (inverted_region.start != region.start) {
- dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
- pos, &region.start);
res->flags |= IORESOURCE_UNSET;
- res->end -= res->start;
res->start = 0;
+ res->end = region.end - region.start;
+ bar_invalid = true;
}
goto out;
@@ -303,8 +306,15 @@ out:
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
if (bar_too_big)
- dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
- if (res->flags && !bar_disabled)
+ dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
+ pos, (unsigned long long) sz64);
+ if (bar_too_high)
+ dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
+ pos, (unsigned long long) l64);
+ if (bar_invalid)
+ dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
+ pos, (unsigned long long) region.start);
+ if (res->flags)
dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
@@ -465,7 +475,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
if (dev->transparent) {
pci_bus_for_each_resource(child->parent, res, i) {
- if (res) {
+ if (res && res->flags) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
dev_printk(KERN_DEBUG, &dev->dev,
@@ -719,7 +729,7 @@ add_dev:
return child;
}
-struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
{
struct pci_bus *child;
@@ -984,6 +994,43 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
/**
+ * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
+ * @dev: PCI device
+ *
+ * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
+ * when forwarding a type1 configuration request the bridge must check that
+ * the extended register address field is zero. The bridge is not permitted
+ * to forward the transactions and must handle it as an Unsupported Request.
+ * Some bridges do not follow this rule and simply drop the extended register
+ * bits, resulting in the standard config space being aliased, every 256
+ * bytes across the entire configuration space. Test for this condition by
+ * comparing the first dword of each potential alias to the vendor/device ID.
+ * Known offenders:
+ * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
+ * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
+ */
+static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ int pos;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+ if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+ || header != tmp)
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
* pci_cfg_space_size - get the configuration space size of the PCI device.
* @dev: PCI device
*
@@ -1001,7 +1048,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
goto fail;
- if (status == 0xffffffff)
+ if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
goto fail;
return PCI_CFG_SPACE_EXP_SIZE;
@@ -1215,6 +1262,7 @@ static void pci_release_dev(struct device *dev)
pci_release_of_node(pci_dev);
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
+ kfree(pci_dev->driver_override);
kfree(pci_dev);
}
@@ -1369,7 +1417,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
WARN_ON(ret < 0);
}
-struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
+struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
@@ -1617,7 +1665,7 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
*/
void pcie_bus_configure_settings(struct pci_bus *bus)
{
- u8 smpss;
+ u8 smpss = 0;
if (!bus->self)
return;
@@ -1670,8 +1718,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
for (pass=0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev, max, pass);
}
@@ -1958,7 +2005,7 @@ EXPORT_SYMBOL(pci_scan_bus);
*
* Returns the max number of subordinate bus discovered.
*/
-unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
{
unsigned int max;
struct pci_bus *bus = bridge->subordinate;
@@ -1981,7 +2028,7 @@ unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
*
* Returns the max number of subordinate bus discovered.
*/
-unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
+unsigned int pci_rescan_bus(struct pci_bus *bus)
{
unsigned int max;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e7292065a1b1..92e68c7747f7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2954,6 +2954,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
/*
* PCI devices which are on Intel chips can skip the 10ms delay
@@ -2991,6 +2992,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
+/*
+ * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
+ * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
+ *
+ * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
@@ -3453,6 +3462,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
/* Wildcat PCH */
0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
+ /* Patsburg (X79) PCH */
+ 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
};
static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 4a1b972efe7f..8e495bda678f 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -7,7 +7,6 @@
* Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com>
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 138bdd6393be..fd9b545c3cf5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -713,12 +713,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
bus resource of a given type. Note: we intentionally skip
the bus resources which have already been assigned (that is,
have non-NULL parent resource). */
-static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
+static struct resource *find_free_bus_resource(struct pci_bus *bus,
+ unsigned long type_mask, unsigned long type)
{
int i;
struct resource *r;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
@@ -815,7 +814,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
resource_size_t add_size, struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
+ struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -907,36 +907,40 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
* @bus : the bus
* @mask: mask the resource flag, then compare it with type
* @type: the type of free resource from bridge
+ * @type2: second match type
+ * @type3: third match type
* @min_size : the minimum memory window that must to be allocated
* @add_size : additional optional memory window
* @realloc_head : track the additional memory window on this list
*
* Calculate the size of the bus and minimal alignment which
* guarantees that all child resources fit in this size.
+ *
+ * Returns -ENOSPC if there's no available bus resource of the desired type.
+ * Otherwise, sets the bus resource start/end to indicate the required
+ * size, adds things to realloc_head (if supplied), and returns 0.
*/
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, resource_size_t min_size,
- resource_size_t add_size,
- struct list_head *realloc_head)
+ unsigned long type, unsigned long type2,
+ unsigned long type3,
+ resource_size_t min_size, resource_size_t add_size,
+ struct list_head *realloc_head)
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
+ resource_size_t aligns[14]; /* Alignments from 1Mb to 8Gb */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus, type);
- unsigned int mem64_mask = 0;
+ struct resource *b_res = find_free_bus_resource(bus,
+ mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
if (!b_res)
- return 0;
+ return -ENOSPC;
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
- mem64_mask = b_res->flags & IORESOURCE_MEM_64;
- b_res->flags &= ~IORESOURCE_MEM_64;
-
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -944,7 +948,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
- if (r->parent || (r->flags & mask) != type)
+ if (r->parent || ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
@@ -957,10 +963,17 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
}
#endif
- /* For bridges size != alignment */
+ /*
+ * aligns[0] is for 1MB (since bridge memory
+ * windows are always at least 1MB aligned), so
+ * keep "order" from being negative for smaller
+ * resources.
+ */
align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
- if (order > 11) {
+ if (order < 0)
+ order = 0;
+ if (order >= ARRAY_SIZE(aligns)) {
dev_warn(&dev->dev, "disabling BAR %d: %pR "
"(bad alignment %#llx)\n", i, r,
(unsigned long long) align);
@@ -968,15 +981,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
}
size += r_size;
- if (order < 0)
- order = 0;
/* Exclude ranges with size > align from
calculation of the alignment. */
if (r_size == align)
aligns[order] += align;
if (order > max_order)
max_order = order;
- mem64_mask &= r->flags & IORESOURCE_MEM_64;
if (realloc_head)
children_add_size += get_res_add_size(realloc_head, r);
@@ -997,18 +1007,18 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
"%pR to %pR (unused)\n", b_res,
&bus->busn_res);
b_res->flags = 0;
- return 1;
+ return 0;
}
b_res->start = min_align;
b_res->end = size0 + min_align - 1;
- b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
+ b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
"%pR to %pR add_size %llx\n", b_res,
&bus->busn_res, (unsigned long long)size1-size0);
}
- return 1;
+ return 0;
}
unsigned long pci_cardbus_resource_alignment(struct resource *res)
@@ -1113,12 +1123,13 @@ handle_done:
;
}
-void __ref __pci_bus_size_bridges(struct pci_bus *bus,
- struct list_head *realloc_head)
+void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
- unsigned long mask, prefmask;
+ unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ struct resource *b_res;
+ int ret;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1152,41 +1163,93 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
additional_io_size = pci_hotplug_io_size;
additional_mem_size = pci_hotplug_mem_size;
}
- /*
- * Follow thru
- */
+ /* Fall through */
default:
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- /* If the bridge supports prefetchable range, size it
- separately. If it doesn't, or its prefetchable window
- has already been allocated by arch code, try
- non-prefetchable range for both types of PCI memory
- resources. */
+
+ /*
+ * If there's a 64-bit prefetchable MMIO window, compute
+ * the size required to put all 64-bit prefetchable
+ * resources in it.
+ */
+ b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask,
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ prefmask |= IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head))
- mask = prefmask; /* Success, size non-prefetch only. */
- else
- additional_mem_size += additional_mem_size;
- pbus_size_mem(bus, mask, IORESOURCE_MEM,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, all non-prefetchable resources
+ * and any 32-bit prefetchable resources will go in
+ * the non-prefetchable window.
+ */
+ if (ret == 0) {
+ mask = prefmask;
+ type2 = prefmask & ~IORESOURCE_MEM_64;
+ type3 = prefmask & ~IORESOURCE_PREFETCH;
+ }
+ }
+
+ /*
+ * If there is no 64-bit prefetchable window, compute the
+ * size required to put all prefetchable resources in the
+ * 32-bit prefetchable window (if there is one).
+ */
+ if (!type2) {
+ prefmask &= ~IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, only non-prefetchable resources
+ * will go in the non-prefetchable window.
+ */
+ if (ret == 0)
+ mask = prefmask;
+ else
+ additional_mem_size += additional_mem_size;
+
+ type2 = type3 = IORESOURCE_MEM;
+ }
+
+ /*
+ * Compute the size required to put everything else in the
+ * non-prefetchable window. This includes:
+ *
+ * - all non-prefetchable resources
+ * - 32-bit prefetchable resources if there's a 64-bit
+ * prefetchable window or no prefetchable window at all
+ * - 64-bit prefetchable resources if there's no
+ * prefetchable window at all
+ *
+ * Note that the strategy in __pci_assign_resource() must
+ * match that used here. Specifically, we cannot put a
+ * 32-bit prefetchable resource in a 64-bit prefetchable
+ * window.
+ */
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
realloc_head ? 0 : additional_mem_size,
additional_mem_size, realloc_head);
break;
}
}
-void __ref pci_bus_size_bridges(struct pci_bus *bus)
+void pci_bus_size_bridges(struct pci_bus *bus)
{
__pci_bus_size_bridges(bus, NULL);
}
EXPORT_SYMBOL(pci_bus_size_bridges);
-void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct list_head *realloc_head,
- struct list_head *fail_head)
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
@@ -1218,15 +1281,15 @@ void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
}
}
-void __ref pci_bus_assign_resources(const struct pci_bus *bus)
+void pci_bus_assign_resources(const struct pci_bus *bus)
{
__pci_bus_assign_resources(bus, NULL, NULL);
}
EXPORT_SYMBOL(pci_bus_assign_resources);
-static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
- struct list_head *add_head,
- struct list_head *fail_head)
+static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
+ struct list_head *add_head,
+ struct list_head *fail_head)
{
struct pci_bus *b;
@@ -1257,42 +1320,66 @@ static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
- int idx;
- bool changed = false;
- struct pci_dev *dev;
+ struct pci_dev *dev = bus->self;
struct resource *r;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+ unsigned old_flags = 0;
+ struct resource *b_res;
+ int idx = 1;
- dev = bus->self;
- for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
- idx++) {
- r = &dev->resource[idx];
- if ((r->flags & type_mask) != type)
- continue;
- if (!r->parent)
- continue;
- /*
- * if there are children under that, we should release them
- * all
- */
- release_child_resources(r);
- if (!release_resource(r)) {
- dev_printk(KERN_DEBUG, &dev->dev,
- "resource %d %pR released\n", idx, r);
- /* keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
- r->flags = 0;
- changed = true;
- }
- }
+ b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
+
+ /*
+ * 1. if there is io port assign fail, will release bridge
+ * io port.
+ * 2. if there is non pref mmio assign fail, release bridge
+ * nonpref mmio.
+ * 3. if there is 64bit pref mmio assign fail, and bridge pref
+ * is 64bit, release bridge pref mmio.
+ * 4. if there is pref mmio assign fail, and bridge pref is
+ * 32bit mmio, release bridge pref mmio
+ * 5. if there is pref mmio assign fail, and bridge pref is not
+ * assigned, release bridge nonpref mmio.
+ */
+ if (type & IORESOURCE_IO)
+ idx = 0;
+ else if (!(type & IORESOURCE_PREFETCH))
+ idx = 1;
+ else if ((type & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_MEM_64))
+ idx = 2;
+ else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_PREFETCH))
+ idx = 2;
+ else
+ idx = 1;
+
+ r = &b_res[idx];
+
+ if (!r->parent)
+ return;
+
+ /*
+ * if there are children under that, we should release them
+ * all
+ */
+ release_child_resources(r);
+ if (!release_resource(r)) {
+ type = old_flags = r->flags & type_mask;
+ dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
+ PCI_BRIDGE_RESOURCES + idx, r);
+ /* keep the old size */
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ r->flags = 0;
- if (changed) {
/* avoiding touch the one without PREF */
if (type & IORESOURCE_PREFETCH)
type = IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
+ /* for next child res under same bridge */
+ r->flags = old_flags;
}
}
@@ -1304,9 +1391,9 @@ enum release_type {
* try to release pci bridge resources that is from leaf bridge,
* so we can allocate big new one later
*/
-static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
- unsigned long type,
- enum release_type rel_type)
+static void pci_bus_release_bridge_resources(struct pci_bus *bus,
+ unsigned long type,
+ enum release_type rel_type)
{
struct pci_dev *dev;
bool is_leaf_bridge = true;
@@ -1471,7 +1558,7 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -1629,9 +1716,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list)
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- if (dev->subordinate)
+ if (pci_is_bridge(dev) && pci_has_subordinate(dev))
__pci_bus_size_bridges(dev->subordinate,
&add_list);
up_read(&pci_bus_sem);
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 9bd6864ec5d3..dbc4ffcf42de 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -10,7 +10,6 @@
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/errno.h>
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7eed671d5586..33f9e32d94d0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -16,7 +16,6 @@
* Resource sorting
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
@@ -209,21 +208,42 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
- /* First, try exact prefetching match.. */
+ /*
+ * First, try exact prefetching match. Even if a 64-bit
+ * prefetchable bridge window is below 4GB, we can't put a 32-bit
+ * prefetchable resource in it because pbus_size_mem() assumes a
+ * 64-bit window will contain no 32-bit resources. If we assign
+ * things differently than they were sized, not everything will fit.
+ */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
- IORESOURCE_PREFETCH,
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
- if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) {
- /*
- * That failed.
- *
- * But a prefetching area can handle a non-prefetching
- * window (it will just not perform as well).
- */
- ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
+ /*
+ * If the prefetchable window is only 32 bits wide, we can put
+ * 64-bit prefetchable resources in it.
+ */
+ if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
+ (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH,
pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
}
+
+ /*
+ * If we didn't find a better match, we can put any memory resource
+ * in a non-prefetchable window. If this resource is 32 bits and
+ * non-prefetchable, the first call already tried the only possibility
+ * so we don't need to try again.
+ */
+ if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
+ ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
+ pcibios_align_resource, dev);
+
return ret;
}
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 8bde61952d20..4fe4cc4ae19a 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -78,8 +78,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list)
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev, max, pass);
/*
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIOF_DIR_IN;
}
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
-static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
{
struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
wmt_clearbits(data, reg_data_out, BIT(bit));
}
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ wmt_gpio_set_value(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
static struct gpio_chip wmt_gpio_chip = {
.label = "gpio-wmt",
.owner = THIS_MODULE,
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c5e082fb82fa..91ef69a52263 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -642,8 +642,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
- if (pci_bus_add_device(dev))
- pr_err("Unable to hotplug wifi\n");
+ pci_bus_add_device(dev);
}
} else {
dev = pci_get_slot(bus, 0);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 399e8c562192..9b0c57cd1d4a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -633,8 +633,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
- if (pci_bus_add_device(dev))
- pr_err("Unable to hotplug wifi\n");
+ pci_bus_add_device(dev);
}
} else {
dev = pci_get_slot(bus, 0);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index fa0e4e057b99..bdcf5173e377 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -12,6 +12,14 @@ config POWER_RESET_AS3722
help
This driver supports turning off board via a ams AS3722 power-off.
+config POWER_RESET_AXXIA
+ bool "LSI Axxia reset driver"
+ depends on POWER_RESET && ARCH_AXXIA
+ help
+ This driver supports restart for Axxia SoC.
+
+ Say Y if you have an Axxia family SoC.
+
config POWER_RESET_GPIO
bool "GPIO power-off driver"
depends on OF_GPIO && POWER_RESET
@@ -43,6 +51,13 @@ config POWER_RESET_RESTART
Instead they restart, and u-boot holds the SoC until the
user presses a key. u-boot then boots into Linux.
+config POWER_RESET_SUN6I
+ bool "Allwinner A31 SoC reset driver"
+ depends on ARCH_SUNXI
+ depends on POWER_RESET
+ help
+ Reboot support for the Allwinner A31 SoCs.
+
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
@@ -57,3 +72,11 @@ config POWER_RESET_XGENE
depends on POWER_RESET
help
Reboot support for the APM SoC X-Gene Eval boards.
+
+config POWER_RESET_KEYSTONE
+ bool "Keystone reset driver"
+ depends on ARCH_KEYSTONE
+ select MFD_SYSCON
+ help
+ Reboot support for the KEYSTONE SoCs.
+
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index a5b4a77d1a41..dde2e8bbac53 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,7 +1,10 @@
obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
+obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
+obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
+obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c
new file mode 100644
index 000000000000..3b1f8d601784
--- /dev/null
+++ b/drivers/power/reset/axxia-reset.c
@@ -0,0 +1,88 @@
+/*
+ * Reset driver for Axxia devices
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+#include <asm/system_misc.h>
+
+
+#define SC_CRIT_WRITE_KEY 0x1000
+#define SC_LATCH_ON_RESET 0x1004
+#define SC_RESET_CONTROL 0x1008
+#define RSTCTL_RST_ZERO (1<<3)
+#define RSTCTL_RST_FAB (1<<2)
+#define RSTCTL_RST_CHIP (1<<1)
+#define RSTCTL_RST_SYS (1<<0)
+#define SC_EFUSE_INT_STATUS 0x180c
+#define EFUSE_READ_DONE (1<<31)
+
+static struct regmap *syscon;
+
+static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ /* Access Key (0xab) */
+ regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab);
+ /* Select internal boot from 0xffff0000 */
+ regmap_write(syscon, SC_LATCH_ON_RESET, 0x00000040);
+ /* Assert ResetReadDone (to avoid hanging in boot ROM) */
+ regmap_write(syscon, SC_EFUSE_INT_STATUS, EFUSE_READ_DONE);
+ /* Assert chip reset */
+ regmap_update_bits(syscon, SC_RESET_CONTROL,
+ RSTCTL_RST_CHIP, RSTCTL_RST_CHIP);
+}
+
+static int axxia_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ pr_err("%s: syscon lookup failed\n", dev->of_node->name);
+ return PTR_ERR(syscon);
+ }
+
+ arm_pm_restart = do_axxia_restart;
+
+ return 0;
+}
+
+static const struct of_device_id of_axxia_reset_match[] = {
+ { .compatible = "lsi,axm55xx-reset", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_axxia_reset_match);
+
+static struct platform_driver axxia_reset_driver = {
+ .probe = axxia_reset_probe,
+ .driver = {
+ .name = "axxia-reset",
+ .of_match_table = of_match_ptr(of_axxia_reset_match),
+ },
+};
+
+static int __init axxia_reset_init(void)
+{
+ return platform_driver_register(&axxia_reset_driver);
+}
+device_initcall(axxia_reset_init);
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
new file mode 100644
index 000000000000..408a18fd91cb
--- /dev/null
+++ b/drivers/power/reset/keystone-reset.c
@@ -0,0 +1,166 @@
+/*
+ * TI keystone reboot driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <asm/system_misc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+
+#define RSTYPE_RG 0x0
+#define RSCTRL_RG 0x4
+#define RSCFG_RG 0x8
+#define RSISO_RG 0xc
+
+#define RSCTRL_KEY_MASK 0x0000ffff
+#define RSCTRL_RESET_MASK BIT(16)
+#define RSCTRL_KEY 0x5a69
+
+#define RSMUX_OMODE_MASK 0xe
+#define RSMUX_OMODE_RESET_ON 0xa
+#define RSMUX_OMODE_RESET_OFF 0x0
+#define RSMUX_LOCK_MASK 0x1
+#define RSMUX_LOCK_SET 0x1
+
+#define RSCFG_RSTYPE_SOFT 0x300f
+#define RSCFG_RSTYPE_HARD 0x0
+
+#define WDT_MUX_NUMBER 0x4
+
+static int rspll_offset;
+static struct regmap *pllctrl_regs;
+
+/**
+ * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG
+ * To be able to access to RSCTRL, RSCFG registers
+ * we have to write a key before
+ */
+static inline int rsctrl_enable_rspll_write(void)
+{
+ return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
+ RSCTRL_KEY_MASK, RSCTRL_KEY);
+}
+
+static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
+{
+ /* enable write access to RSTCTRL */
+ rsctrl_enable_rspll_write();
+
+ /* reset the SOC */
+ regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
+ RSCTRL_RESET_MASK, 0);
+}
+
+static struct of_device_id rsctrl_of_match[] = {
+ {.compatible = "ti,keystone-reset", },
+ {},
+};
+
+static int rsctrl_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ u32 val;
+ unsigned int rg;
+ u32 rsmux_offset;
+ struct regmap *devctrl_regs;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ /* get regmaps */
+ pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll");
+ if (IS_ERR(pllctrl_regs))
+ return PTR_ERR(pllctrl_regs);
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset);
+ if (ret) {
+ dev_err(dev, "couldn't read the reset pll offset!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset);
+ if (ret) {
+ dev_err(dev, "couldn't read the rsmux offset!\n");
+ return -EINVAL;
+ }
+
+ /* set soft/hard reset */
+ val = of_property_read_bool(np, "ti,soft-reset");
+ val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD;
+
+ ret = rsctrl_enable_rspll_write();
+ if (ret)
+ return ret;
+
+ ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val);
+ if (ret)
+ return ret;
+
+ arm_pm_restart = rsctrl_restart;
+
+ /* disable a reset isolation for all module clocks */
+ ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0);
+ if (ret)
+ return ret;
+
+ /* enable a reset for watchdogs from wdt-list */
+ for (i = 0; i < WDT_MUX_NUMBER; i++) {
+ ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val);
+ if (ret == -EOVERFLOW && !i) {
+ dev_err(dev, "ti,wdt-list property has to contain at"
+ "least one entry\n");
+ return -EINVAL;
+ } else if (ret) {
+ break;
+ }
+
+ if (val >= WDT_MUX_NUMBER) {
+ dev_err(dev, "ti,wdt-list property can contain"
+ "only numbers < 4\n");
+ return -EINVAL;
+ }
+
+ rg = rsmux_offset + val * 4;
+
+ ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK,
+ RSMUX_OMODE_RESET_ON |
+ RSMUX_LOCK_SET);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rsctrl_driver = {
+ .probe = rsctrl_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .of_match_table = rsctrl_of_match,
+ },
+};
+module_platform_driver(rsctrl_driver);
+
+MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments keystone reset driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c
new file mode 100644
index 000000000000..af2cd7ff2fe8
--- /dev/null
+++ b/drivers/power/reset/sun6i-reboot.c
@@ -0,0 +1,85 @@
+/*
+ * Allwinner A31 SoCs reset code
+ *
+ * Copyright (C) 2012-2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+#define SUN6I_WATCHDOG1_IRQ_REG 0x00
+#define SUN6I_WATCHDOG1_CTRL_REG 0x10
+#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_REG 0x14
+#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1)
+#define SUN6I_WATCHDOG1_MODE_REG 0x18
+#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0)
+
+static void __iomem *wdt_base;
+
+static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd)
+{
+ if (!wdt_base)
+ return;
+
+ /* Disable interrupts */
+ writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG);
+
+ /* We want to disable the IRQ and just reset the whole system */
+ writel(SUN6I_WATCHDOG1_CONFIG_RESTART,
+ wdt_base + SUN6I_WATCHDOG1_CONFIG_REG);
+
+ /* Enable timer. The default and lowest interval value is 0.5s */
+ writel(SUN6I_WATCHDOG1_MODE_ENABLE,
+ wdt_base + SUN6I_WATCHDOG1_MODE_REG);
+
+ /* Restart the watchdog. */
+ writel(SUN6I_WATCHDOG1_CTRL_RESTART,
+ wdt_base + SUN6I_WATCHDOG1_CTRL_REG);
+
+ while (1) {
+ mdelay(5);
+ writel(SUN6I_WATCHDOG1_MODE_ENABLE,
+ wdt_base + SUN6I_WATCHDOG1_MODE_REG);
+ }
+}
+
+static int sun6i_reboot_probe(struct platform_device *pdev)
+{
+ wdt_base = of_iomap(pdev->dev.of_node, 0);
+ if (!wdt_base) {
+ WARN(1, "failed to map watchdog base address");
+ return -ENODEV;
+ }
+
+ arm_pm_restart = sun6i_wdt_restart;
+
+ return 0;
+}
+
+static struct of_device_id sun6i_reboot_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-wdt" },
+ {}
+};
+
+static struct platform_driver sun6i_reboot_driver = {
+ .probe = sun6i_reboot_probe,
+ .driver = {
+ .name = "sun6i-reboot",
+ .of_match_table = sun6i_reboot_of_match,
+ },
+};
+module_platform_driver(sun6i_reboot_driver);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index b95cf71ed695..4dc102e2b230 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -23,10 +23,10 @@
static void vexpress_reset_do(struct device *dev, const char *what)
{
int err = -ENOENT;
- struct vexpress_config_func *func = dev_get_drvdata(dev);
+ struct regmap *reg = dev_get_drvdata(dev);
- if (func) {
- err = vexpress_config_write(func, 0, 0);
+ if (reg) {
+ err = regmap_write(reg, 0, 0);
if (!err)
mdelay(1000);
}
@@ -91,17 +91,17 @@ static int vexpress_reset_probe(struct platform_device *pdev)
enum vexpress_reset_func func;
const struct of_device_id *match =
of_match_device(vexpress_reset_of_match, &pdev->dev);
- struct vexpress_config_func *config_func;
+ struct regmap *regmap;
if (match)
func = (enum vexpress_reset_func)match->data;
else
func = pdev->id_entry->driver_data;
- config_func = vexpress_config_func_get_by_dev(&pdev->dev);
- if (!config_func)
- return -EINVAL;
- dev_set_drvdata(&pdev->dev, config_func);
+ regmap = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ dev_set_drvdata(&pdev->dev, regmap);
switch (func) {
case FUNC_SHUTDOWN:
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
+ depends on NET
select PPS
select NET_PTP_CLASSIFY
help
@@ -74,7 +75,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86 || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && NET
select PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 903eb37f047a..f0cc9e6dac3a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -266,11 +266,12 @@ config REGULATOR_LP8788
This driver supports LP8788 voltage regulator chip.
config REGULATOR_MAX14577
- tristate "Maxim 14577 regulator"
+ tristate "Maxim 14577/77836 regulator"
depends on MFD_MAX14577
help
- This driver controls a Maxim 14577 regulator via I2C bus.
- The regulators include safeout LDO and current regulator 'CHARGER'.
+ This driver controls a Maxim MAX14577/77836 regulator via I2C bus.
+ The MAX14577 regulators include safeout LDO and charger current
+ regulator. The MAX77836 has two additional LDOs.
config REGULATOR_MAX1586
tristate "Maxim 1586/1587 voltage regulator"
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index ed60baaeceec..5d9c605cf534 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -1,5 +1,5 @@
/*
- * max14577.c - Regulator driver for the Maxim 14577
+ * max14577.c - Regulator driver for the Maxim 14577/77836
*
* Copyright (C) 2013,2014 Samsung Electronics
* Krzysztof Kozlowski <k.kozlowski@samsung.com>
@@ -22,6 +22,42 @@
#include <linux/mfd/max14577-private.h>
#include <linux/regulator/of_regulator.h>
+/*
+ * Valid limits of current for max14577 and max77836 chargers.
+ * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4
+ * register for given chipset.
+ */
+struct maxim_charger_current {
+ /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */
+ unsigned int min;
+ /*
+ * Minimal current when high setting is active,
+ * set in CHGCTRL4/MBCICHWRCH, uA
+ */
+ unsigned int high_start;
+ /* Value of one step in high setting, uA */
+ unsigned int high_step;
+ /* Maximum current of high setting, uA */
+ unsigned int max;
+};
+
+/* Table of valid charger currents for different Maxim chipsets */
+static const struct maxim_charger_current maxim_charger_currents[] = {
+ [MAXIM_DEVICE_TYPE_UNKNOWN] = { 0, 0, 0, 0 },
+ [MAXIM_DEVICE_TYPE_MAX14577] = {
+ .min = MAX14577_REGULATOR_CURRENT_LIMIT_MIN,
+ .high_start = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START,
+ .high_step = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP,
+ .max = MAX14577_REGULATOR_CURRENT_LIMIT_MAX,
+ },
+ [MAXIM_DEVICE_TYPE_MAX77836] = {
+ .min = MAX77836_REGULATOR_CURRENT_LIMIT_MIN,
+ .high_start = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START,
+ .high_step = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP,
+ .max = MAX77836_REGULATOR_CURRENT_LIMIT_MAX,
+ },
+};
+
static int max14577_reg_is_enabled(struct regulator_dev *rdev)
{
int rid = rdev_get_id(rdev);
@@ -47,6 +83,9 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
{
u8 reg_data;
struct regmap *rmap = rdev->regmap;
+ struct max14577 *max14577 = rdev_get_drvdata(rdev);
+ const struct maxim_charger_current *limits =
+ &maxim_charger_currents[max14577->dev_type];
if (rdev_get_id(rdev) != MAX14577_CHARGER)
return -EINVAL;
@@ -54,12 +93,11 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
- return MAX14577_REGULATOR_CURRENT_LIMIT_MIN;
+ return limits->min;
reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >>
CHGCTRL4_MBCICHWRCH_SHIFT);
- return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
- reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP;
+ return limits->high_start + reg_data * limits->high_step;
}
static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
@@ -67,33 +105,39 @@ static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
{
int i, current_bits = 0xf;
u8 reg_data;
+ struct max14577 *max14577 = rdev_get_drvdata(rdev);
+ const struct maxim_charger_current *limits =
+ &maxim_charger_currents[max14577->dev_type];
if (rdev_get_id(rdev) != MAX14577_CHARGER)
return -EINVAL;
- if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX ||
- max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN)
+ if (min_uA > limits->max || max_uA < limits->min)
return -EINVAL;
- if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) {
- /* Less than 200 mA, so set 90mA (turn only Low Bit off) */
+ if (max_uA < limits->high_start) {
+ /*
+ * Less than high_start,
+ * so set the minimal current (turn only Low Bit off)
+ */
u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT;
return max14577_update_reg(rdev->regmap,
MAX14577_CHG_REG_CHG_CTRL4,
CHGCTRL4_MBCICHWRCL_MASK, reg_data);
}
- /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for
- * valid current starting from LIMIT_MAX. */
- for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX;
- i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START;
- i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) {
+ /*
+ * max_uA is in range: <high_start, inifinite>, so search for
+ * valid current starting from maximum current.
+ */
+ for (i = limits->max; i >= limits->high_start; i -= limits->high_step) {
if (i <= max_uA)
break;
current_bits--;
}
BUG_ON(current_bits < 0); /* Cannot happen */
- /* Turn Low Bit on (use range 200mA-950 mA) */
+
+ /* Turn Low Bit on (use range high_start-max)... */
reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT;
/* and set proper High Bits */
reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT;
@@ -118,7 +162,7 @@ static struct regulator_ops max14577_charger_ops = {
.set_current_limit = max14577_reg_set_current_limit,
};
-static const struct regulator_desc supported_regulators[] = {
+static const struct regulator_desc max14577_supported_regulators[] = {
[MAX14577_SAFEOUT] = {
.name = "SAFEOUT",
.id = MAX14577_SAFEOUT,
@@ -141,16 +185,88 @@ static const struct regulator_desc supported_regulators[] = {
},
};
+static struct regulator_ops max77836_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ /* TODO: add .set_suspend_mode */
+};
+
+static const struct regulator_desc max77836_supported_regulators[] = {
+ [MAX14577_SAFEOUT] = {
+ .name = "SAFEOUT",
+ .id = MAX14577_SAFEOUT,
+ .ops = &max14577_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE,
+ .enable_reg = MAX14577_REG_CONTROL2,
+ .enable_mask = CTRL2_SFOUTORD_MASK,
+ },
+ [MAX14577_CHARGER] = {
+ .name = "CHARGER",
+ .id = MAX14577_CHARGER,
+ .ops = &max14577_charger_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .enable_reg = MAX14577_CHG_REG_CHG_CTRL2,
+ .enable_mask = CHGCTRL2_MBCHOSTEN_MASK,
+ },
+ [MAX77836_LDO1] = {
+ .name = "LDO1",
+ .id = MAX77836_LDO1,
+ .ops = &max77836_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM,
+ .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN,
+ .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP,
+ .enable_reg = MAX77836_LDO_REG_CNFG1_LDO1,
+ .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK,
+ .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO1,
+ .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK,
+ },
+ [MAX77836_LDO2] = {
+ .name = "LDO2",
+ .id = MAX77836_LDO2,
+ .ops = &max77836_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM,
+ .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN,
+ .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP,
+ .enable_reg = MAX77836_LDO_REG_CNFG1_LDO2,
+ .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK,
+ .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO2,
+ .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK,
+ },
+};
+
#ifdef CONFIG_OF
static struct of_regulator_match max14577_regulator_matches[] = {
{ .name = "SAFEOUT", },
{ .name = "CHARGER", },
};
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+static struct of_regulator_match max77836_regulator_matches[] = {
+ { .name = "SAFEOUT", },
+ { .name = "CHARGER", },
+ { .name = "LDO1", },
+ { .name = "LDO2", },
+};
+
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
+ enum maxim_device_type dev_type)
{
int ret;
struct device_node *np;
+ struct of_regulator_match *regulator_matches;
+ unsigned int regulator_matches_size;
np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!np) {
@@ -158,8 +274,19 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
return -EINVAL;
}
- ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
- MAX14577_REG_MAX);
+ switch (dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ regulator_matches = max77836_regulator_matches;
+ regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
+ break;
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ regulator_matches = max14577_regulator_matches;
+ regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
+ }
+
+ ret = of_regulator_match(&pdev->dev, np, regulator_matches,
+ regulator_matches_size);
if (ret < 0)
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
else
@@ -170,31 +297,74 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
return ret;
}
-static inline struct regulator_init_data *match_init_data(int index)
+static inline struct regulator_init_data *match_init_data(int index,
+ enum maxim_device_type dev_type)
{
- return max14577_regulator_matches[index].init_data;
+ switch (dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ return max77836_regulator_matches[index].init_data;
+
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ return max14577_regulator_matches[index].init_data;
+ }
}
-static inline struct device_node *match_of_node(int index)
+static inline struct device_node *match_of_node(int index,
+ enum maxim_device_type dev_type)
{
- return max14577_regulator_matches[index].of_node;
+ switch (dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ return max77836_regulator_matches[index].of_node;
+
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ return max14577_regulator_matches[index].of_node;
+ }
}
#else /* CONFIG_OF */
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
+ enum maxim_device_type dev_type)
{
return 0;
}
-static inline struct regulator_init_data *match_init_data(int index)
+static inline struct regulator_init_data *match_init_data(int index,
+ enum maxim_device_type dev_type)
{
return NULL;
}
-static inline struct device_node *match_of_node(int index)
+static inline struct device_node *match_of_node(int index,
+ enum maxim_device_type dev_type)
{
return NULL;
}
#endif /* CONFIG_OF */
+/**
+ * Registers for regulators of max77836 use different I2C slave addresses so
+ * different regmaps must be used for them.
+ *
+ * Returns proper regmap for accessing regulator passed by id.
+ */
+static struct regmap *max14577_get_regmap(struct max14577 *max14577,
+ int reg_id)
+{
+ switch (max14577->dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ switch (reg_id) {
+ case MAX77836_SAFEOUT ... MAX77836_CHARGER:
+ return max14577->regmap;
+ default:
+ /* MAX77836_LDO1 ... MAX77836_LDO2 */
+ return max14577->regmap_pmic;
+ }
+
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ return max14577->regmap;
+ }
+}
static int max14577_regulator_probe(struct platform_device *pdev)
{
@@ -202,15 +372,29 @@ static int max14577_regulator_probe(struct platform_device *pdev)
struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
int i, ret;
struct regulator_config config = {};
+ const struct regulator_desc *supported_regulators;
+ unsigned int supported_regulators_size;
+ enum maxim_device_type dev_type = max14577->dev_type;
- ret = max14577_regulator_dt_parse_pdata(pdev);
+ ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
if (ret)
return ret;
+ switch (dev_type) {
+ case MAXIM_DEVICE_TYPE_MAX77836:
+ supported_regulators = max77836_supported_regulators;
+ supported_regulators_size = ARRAY_SIZE(max77836_supported_regulators);
+ break;
+ case MAXIM_DEVICE_TYPE_MAX14577:
+ default:
+ supported_regulators = max14577_supported_regulators;
+ supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
+ }
+
config.dev = &pdev->dev;
- config.regmap = max14577->regmap;
+ config.driver_data = max14577;
- for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+ for (i = 0; i < supported_regulators_size; i++) {
struct regulator_dev *regulator;
/*
* Index of supported_regulators[] is also the id and must
@@ -220,17 +404,19 @@ static int max14577_regulator_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
} else {
- config.init_data = match_init_data(i);
- config.of_node = match_of_node(i);
+ config.init_data = match_init_data(i, dev_type);
+ config.of_node = match_of_node(i, dev_type);
}
+ config.regmap = max14577_get_regmap(max14577,
+ supported_regulators[i].id);
regulator = devm_regulator_register(&pdev->dev,
&supported_regulators[i], &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
dev_err(&pdev->dev,
- "Regulator init failed for ID %d with error: %d\n",
- i, ret);
+ "Regulator init failed for %d/%s with error: %d\n",
+ i, supported_regulators[i].name, ret);
return ret;
}
}
@@ -238,20 +424,41 @@ static int max14577_regulator_probe(struct platform_device *pdev)
return ret;
}
+static const struct platform_device_id max14577_regulator_id[] = {
+ { "max14577-regulator", MAXIM_DEVICE_TYPE_MAX14577, },
+ { "max77836-regulator", MAXIM_DEVICE_TYPE_MAX77836, },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max14577_regulator_id);
+
static struct platform_driver max14577_regulator_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "max14577-regulator",
},
- .probe = max14577_regulator_probe,
+ .probe = max14577_regulator_probe,
+ .id_table = max14577_regulator_id,
};
static int __init max14577_regulator_init(void)
{
+ /* Check for valid values for charger */
BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf !=
MAX14577_REGULATOR_CURRENT_LIMIT_MAX);
- BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX);
+ BUILD_BUG_ON(MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START +
+ MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf !=
+ MAX77836_REGULATOR_CURRENT_LIMIT_MAX);
+ /* Valid charger current values must be provided for each chipset */
+ BUILD_BUG_ON(ARRAY_SIZE(maxim_charger_currents) != MAXIM_DEVICE_TYPE_NUM);
+
+ BUILD_BUG_ON(ARRAY_SIZE(max14577_supported_regulators) != MAX14577_REGULATOR_NUM);
+ BUILD_BUG_ON(ARRAY_SIZE(max77836_supported_regulators) != MAX77836_REGULATOR_NUM);
+
+ BUILD_BUG_ON(MAX77836_REGULATOR_LDO_VOLTAGE_MIN +
+ (MAX77836_REGULATOR_LDO_VOLTAGE_STEP *
+ (MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM - 1)) !=
+ MAX77836_REGULATOR_LDO_VOLTAGE_MAX);
return platform_driver_register(&max14577_regulator_driver);
}
@@ -264,6 +471,6 @@ static void __exit max14577_regulator_exit(void)
module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
-MODULE_DESCRIPTION("MAXIM 14577 regulator driver");
+MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index f3ae28a7e663..2863428813e4 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -26,14 +26,14 @@
struct vexpress_regulator {
struct regulator_desc desc;
struct regulator_dev *regdev;
- struct vexpress_config_func *func;
+ struct regmap *regmap;
};
static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
{
struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
u32 uV;
- int err = vexpress_config_read(reg->func, 0, &uV);
+ int err = regmap_read(reg->regmap, 0, &uV);
return err ? err : uV;
}
@@ -43,7 +43,7 @@ static int vexpress_regulator_set_voltage(struct regulator_dev *regdev,
{
struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
- return vexpress_config_write(reg->func, 0, min_uV);
+ return regmap_write(reg->regmap, 0, min_uV);
}
static struct regulator_ops vexpress_regulator_ops_ro = {
@@ -57,22 +57,17 @@ static struct regulator_ops vexpress_regulator_ops = {
static int vexpress_regulator_probe(struct platform_device *pdev)
{
- int err;
struct vexpress_regulator *reg;
struct regulator_init_data *init_data;
struct regulator_config config = { };
reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL);
- if (!reg) {
- err = -ENOMEM;
- goto error_kzalloc;
- }
+ if (!reg)
+ return -ENOMEM;
- reg->func = vexpress_config_func_get_by_dev(&pdev->dev);
- if (!reg->func) {
- err = -ENXIO;
- goto error_get_func;
- }
+ reg->regmap = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(reg->regmap))
+ return PTR_ERR(reg->regmap);
reg->desc.name = dev_name(&pdev->dev);
reg->desc.type = REGULATOR_VOLTAGE;
@@ -80,10 +75,8 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
reg->desc.continuous_voltage_range = true;
init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
- if (!init_data) {
- err = -EINVAL;
- goto error_get_regulator_init_data;
- }
+ if (!init_data)
+ return -EINVAL;
init_data->constraints.apply_uV = 0;
if (init_data->constraints.min_uV && init_data->constraints.max_uV)
@@ -97,30 +90,12 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
config.of_node = pdev->dev.of_node;
reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config);
- if (IS_ERR(reg->regdev)) {
- err = PTR_ERR(reg->regdev);
- goto error_regulator_register;
- }
+ if (IS_ERR(reg->regdev))
+ return PTR_ERR(reg->regdev);
platform_set_drvdata(pdev, reg);
return 0;
-
-error_regulator_register:
-error_get_regulator_init_data:
- vexpress_config_func_put(reg->func);
-error_get_func:
-error_kzalloc:
- return err;
-}
-
-static int vexpress_regulator_remove(struct platform_device *pdev)
-{
- struct vexpress_regulator *reg = platform_get_drvdata(pdev);
-
- vexpress_config_func_put(reg->func);
-
- return 0;
}
static struct of_device_id vexpress_regulator_of_match[] = {
@@ -130,7 +105,6 @@ static struct of_device_id vexpress_regulator_of_match[] = {
static struct platform_driver vexpress_regulator_driver = {
.probe = vexpress_regulator_probe,
- .remove = vexpress_regulator_remove,
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 4f60caf750ce..60fed3d7820b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..79c32ca84ef1
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * based on
+ * Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define NR_BANKS 4
+#define OFFSET_MODRST 0x10
+
+struct socfpga_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct socfpga_reset_data *data = container_of(rcdev,
+ struct socfpga_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + OFFSET_MODRST +
+ (bank * NR_BANKS));
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct socfpga_reset_data *data = container_of(rcdev,
+ struct socfpga_reset_data,
+ rcdev);
+
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST +
+ (bank * NR_BANKS));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops socfpga_reset_ops = {
+ .assert = socfpga_reset_assert,
+ .deassert = socfpga_reset_deassert,
+};
+
+static int socfpga_reset_probe(struct platform_device *pdev)
+{
+ struct socfpga_reset_data *data;
+ struct resource *res;
+
+ /*
+ * The binding was mainlined without the required property.
+ * Do not continue, when we encounter an old DT.
+ */
+ if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ dev_err(&pdev->dev, "%s missing #reset-cells property\n",
+ pdev->dev.of_node->full_name);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+ data->rcdev.ops = &socfpga_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+ reset_controller_register(&data->rcdev);
+
+ return 0;
+}
+
+static int socfpga_reset_remove(struct platform_device *pdev)
+{
+ struct socfpga_reset_data *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_reset_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver socfpga_reset_driver = {
+ .probe = socfpga_reset_probe,
+ .remove = socfpga_reset_remove,
+ .driver = {
+ .name = "socfpga-reset",
+ .owner = THIS_MODULE,
+ .of_match_table = socfpga_reset_dt_ids,
+ },
+};
+module_platform_driver(socfpga_reset_driver);
+
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
+MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 695bd3496eba..a94e7a7820b4 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -145,7 +145,24 @@ MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
- return sunxi_reset_init(pdev->dev.of_node);
+ struct sunxi_reset_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * 32;
+ data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return reset_controller_register(&data->rcdev);
}
static int sunxi_reset_remove(struct platform_device *pdev)
@@ -153,8 +170,6 @@ static int sunxi_reset_remove(struct platform_device *pdev)
struct sunxi_reset_data *data = platform_get_drvdata(pdev);
reset_controller_unregister(&data->rcdev);
- iounmap(data->membase);
- kfree(data);
return 0;
}
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
index e6f6c41abe12..c93fd260447e 100644
--- a/drivers/reset/sti/reset-stih415.c
+++ b/drivers/reset/sti/reset-stih415.c
@@ -73,6 +73,7 @@ static const struct syscfg_reset_channel_data stih415_softresets[] = {
[STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
[STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
[STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
+ [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
};
static struct syscfg_reset_controller_data stih415_powerdown_controller = {
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
index fe3bf02bdc8c..5fc987076a90 100644
--- a/drivers/reset/sti/reset-stih416.c
+++ b/drivers/reset/sti/reset-stih416.c
@@ -104,6 +104,7 @@ static const struct syscfg_reset_channel_data stih416_softresets[] = {
[STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
[STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
[STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
+ [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
};
static struct syscfg_reset_controller_data stih416_powerdown_controller = {
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 4ccb5d869389..a40ee1e37486 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -207,7 +207,7 @@ static void jsfd_do_request(struct request_queue *q)
goto end;
}
- jsfd_read(req->buffer, jdp->dbase + offset, len);
+ jsfd_read(bio_data(req->bio), jdp->dbase + offset, len);
err = 0;
end:
if (!__blk_end_request_cur(req, err))
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 296c936cc03c..a8d721ff19eb 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -639,7 +639,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host)
"double buffer\n");
return 0;
}
- atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer);
+ atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
atari_dma_orig_addr = 0;
}
#endif
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9db097a28a74..a0c95cac91f0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -140,7 +140,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
cmd->result = 0;
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
- kblockd_schedule_work(q, &device->requeue_work);
+ kblockd_schedule_work(&device->requeue_work);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -1019,8 +1019,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
return BLKPREP_DEFER;
}
- req->buffer = NULL;
-
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
@@ -1158,7 +1156,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
- req->buffer = NULL;
}
cmd->cmd_len = req->cmd_len;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
- sas_bsg_remove(shost, rphy);
-
transport_destroy_device(dev);
put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
}
sas_rphy_unlink(rphy);
+ sas_bsg_remove(NULL, rphy);
transport_remove_device(dev);
device_del(dev);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index efcbcd182863..96af195224f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -737,16 +737,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
goto out;
}
+ rq->completion_data = page;
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
- rq->buffer = page_address(page);
rq->__data_len = nr_bytes;
out:
- if (ret != BLKPREP_OK) {
+ if (ret != BLKPREP_OK)
__free_page(page);
- rq->buffer = NULL;
- }
return ret;
}
@@ -842,10 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{
struct scsi_cmnd *SCpnt = rq->special;
- if (rq->cmd_flags & REQ_DISCARD) {
- free_page((unsigned long)rq->buffer);
- rq->buffer = NULL;
- }
+ if (rq->cmd_flags & REQ_DISCARD)
+ __free_page(rq->completion_data);
+
if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
SCpnt->cmnd = NULL;
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
new file mode 100644
index 000000000000..c8543855aa82
--- /dev/null
+++ b/drivers/soc/Kconfig
@@ -0,0 +1,5 @@
+menu "SOC (System On Chip) specific Drivers"
+
+source "drivers/soc/qcom/Kconfig"
+
+endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
new file mode 100644
index 000000000000..0f7c44793b29
--- /dev/null
+++ b/drivers/soc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Linux Kernel SOC specific device drivers.
+#
+
+obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 000000000000..7bd2c94f54a4
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,11 @@
+#
+# QCOM Soc drivers
+#
+config QCOM_GSBI
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
+
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 000000000000..438901257ac1
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 000000000000..447458e696a9
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define GSBI_CTRL_REG 0x0000
+#define GSBI_PROTOCOL_SHIFT 4
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *hclk;
+ u32 mode, crci = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_property_read_u32(node, "qcom,mode", &mode)) {
+ dev_err(&pdev->dev, "missing mode configuration\n");
+ return -EINVAL;
+ }
+
+ /* not required, so default to 0 if not present */
+ of_property_read_u32(node, "qcom,crci", &crci);
+
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
+
+ hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(hclk))
+ return PTR_ERR(hclk);
+
+ clk_prepare_enable(hclk);
+
+ writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
+ base + GSBI_CTRL_REG);
+
+ /* make sure the gsbi control write is not reordered */
+ wmb();
+
+ clk_disable_unprepare(hclk);
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+ { .compatible = "qcom,gsbi-v1.0.0", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+ .driver = {
+ .name = "gsbi",
+ .owner = THIS_MODULE,
+ .of_match_table = gsbi_dt_match,
+ },
+ .probe = gsbi_probe,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 053b98eb46c8..778e376f197e 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -52,7 +52,6 @@ struct msm_port {
struct clk *clk;
struct clk *pclk;
unsigned int imr;
- void __iomem *gsbi_base;
int is_uartdm;
unsigned int old_snap_state;
};
@@ -599,9 +598,7 @@ static const char *msm_type(struct uart_port *port)
static void msm_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
struct resource *uart_resource;
- struct resource *gsbi_resource;
resource_size_t size;
uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -612,28 +609,12 @@ static void msm_release_port(struct uart_port *port)
release_mem_region(port->mapbase, size);
iounmap(port->membase);
port->membase = NULL;
-
- if (msm_port->gsbi_base) {
- writel_relaxed(GSBI_PROTOCOL_IDLE,
- msm_port->gsbi_base + GSBI_CONTROL);
-
- gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (unlikely(!gsbi_resource))
- return;
-
- size = resource_size(gsbi_resource);
- release_mem_region(gsbi_resource->start, size);
- iounmap(msm_port->gsbi_base);
- msm_port->gsbi_base = NULL;
- }
}
static int msm_request_port(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *uart_resource;
- struct resource *gsbi_resource;
resource_size_t size;
int ret;
@@ -652,30 +633,8 @@ static int msm_request_port(struct uart_port *port)
goto fail_release_port;
}
- gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- /* Is this a GSBI-based port? */
- if (gsbi_resource) {
- size = resource_size(gsbi_resource);
-
- if (!request_mem_region(gsbi_resource->start, size,
- "msm_serial")) {
- ret = -EBUSY;
- goto fail_release_port_membase;
- }
-
- msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
- if (!msm_port->gsbi_base) {
- ret = -EBUSY;
- goto fail_release_gsbi;
- }
- }
-
return 0;
-fail_release_gsbi:
- release_mem_region(gsbi_resource->start, size);
-fail_release_port_membase:
- iounmap(port->membase);
fail_release_port:
release_mem_region(port->mapbase, size);
return ret;
@@ -683,7 +642,6 @@ fail_release_port:
static void msm_config_port(struct uart_port *port, int flags)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
int ret;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
@@ -691,9 +649,6 @@ static void msm_config_port(struct uart_port *port, int flags)
if (ret)
return;
}
- if (msm_port->gsbi_base)
- writel_relaxed(GSBI_PROTOCOL_UART,
- msm_port->gsbi_base + GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -1110,6 +1065,7 @@ static struct of_device_id msm_match_table[] = {
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
+ .probe = msm_serial_probe,
.driver = {
.name = "msm_serial",
.owner = THIS_MODULE,
@@ -1125,7 +1081,7 @@ static int __init msm_serial_init(void)
if (unlikely(ret))
return ret;
- ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ ret = platform_driver_register(&msm_platform_driver);
if (unlikely(ret))
uart_unregister_driver(&msm_uart_driver);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 1e9b68b6f9eb..d98d45efdf86 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -109,11 +109,6 @@
#define UART_ISR 0x0014
#define UART_ISR_TX_READY (1 << 7)
-#define GSBI_CONTROL 0x0
-#define GSBI_PROTOCOL_CODE 0x30
-#define GSBI_PROTOCOL_UART 0x40
-#define GSBI_PROTOCOL_IDLE 0x0
-
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
#define UARTDM_RXFS_BUF_MASK 0x7
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a673e5b6a2e0..e371f5af11f5 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
if (mem->addr & ~PAGE_MASK)
return -ENODEV;
- if (vma->vm_end - vma->vm_start > mem->size)
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size))
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 1270f3b26139..8d0bba469566 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -204,7 +204,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
- goto bad0;
+ goto bad1;
}
uioinfo->irq = ret;
}
@@ -275,6 +275,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
ret = uio_register_device(&pdev->dev, priv->uioinfo);
if (ret) {
dev_err(&pdev->dev, "unable to register uio device\n");
+ pm_runtime_disable(&pdev->dev);
goto bad1;
}
@@ -282,7 +283,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
return 0;
bad1:
kfree(priv);
- pm_runtime_disable(&pdev->dev);
bad0:
/* kfree uioinfo for OF */
if (pdev->dev.of_node)
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 6e146d723b37..69e49be8866b 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
return isp1301_otg_enable(isp);
return 0;
-#elif !defined(CONFIG_USB_GADGET_OMAP)
+#elif !IS_ENABLED(CONFIG_USB_OMAP)
// FIXME update its refcount
otg->host = host;
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index e21d1f58554c..4953b657635e 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -191,7 +191,7 @@ static struct fb_info fb_info = {
};
static void *screen_base; /* base address of screen */
-static void *real_screen_base; /* (only for Overscan) */
+static unsigned long phys_screen_base; /* (only for Overscan) */
static int screen_len;
@@ -213,7 +213,8 @@ static unsigned int external_yres;
*/
static unsigned int external_depth;
static int external_pmode;
-static void *external_addr;
+static void *external_screen_base;
+static unsigned long external_addr;
static unsigned long external_len;
static unsigned long external_vgaiobase;
static unsigned int external_bitspercol = 6;
@@ -592,7 +593,7 @@ static int tt_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par)
int mode;
strcpy(fix->id, "Atari Builtin");
- fix->smem_start = (unsigned long)real_screen_base;
+ fix->smem_start = phys_screen_base;
fix->smem_len = screen_len;
fix->type = FB_TYPE_INTERLEAVED_PLANES;
fix->type_aux = 2;
@@ -790,7 +791,7 @@ static void tt_get_par(struct atafb_par *par)
addr = ((shifter.bas_hi & 0xff) << 16) |
((shifter.bas_md & 0xff) << 8) |
((shifter.bas_lo & 0xff));
- par->screen_base = phys_to_virt(addr);
+ par->screen_base = atari_stram_to_virt(addr);
}
static void tt_set_par(struct atafb_par *par)
@@ -888,7 +889,7 @@ static int falcon_encode_fix(struct fb_fix_screeninfo *fix,
struct atafb_par *par)
{
strcpy(fix->id, "Atari Builtin");
- fix->smem_start = (unsigned long)real_screen_base;
+ fix->smem_start = phys_screen_base;
fix->smem_len = screen_len;
fix->type = FB_TYPE_INTERLEAVED_PLANES;
fix->type_aux = 2;
@@ -1584,7 +1585,7 @@ static void falcon_get_par(struct atafb_par *par)
addr = (shifter.bas_hi & 0xff) << 16 |
(shifter.bas_md & 0xff) << 8 |
(shifter.bas_lo & 0xff);
- par->screen_base = phys_to_virt(addr);
+ par->screen_base = atari_stram_to_virt(addr);
/* derived parameters */
hw->ste_mode = (hw->f_shift & 0x510) == 0 && hw->st_shift == 0x100;
@@ -1814,7 +1815,7 @@ static int stste_encode_fix(struct fb_fix_screeninfo *fix,
int mode;
strcpy(fix->id, "Atari Builtin");
- fix->smem_start = (unsigned long)real_screen_base;
+ fix->smem_start = phys_screen_base;
fix->smem_len = screen_len;
fix->type = FB_TYPE_INTERLEAVED_PLANES;
fix->type_aux = 2;
@@ -1980,7 +1981,7 @@ static void stste_get_par(struct atafb_par *par)
((shifter.bas_md & 0xff) << 8);
if (ATARIHW_PRESENT(EXTD_SHIFTER))
addr |= (shifter.bas_lo & 0xff);
- par->screen_base = phys_to_virt(addr);
+ par->screen_base = atari_stram_to_virt(addr);
}
static void stste_set_par(struct atafb_par *par)
@@ -2039,7 +2040,7 @@ static int stste_detect(void)
static void stste_set_screen_base(void *s_base)
{
unsigned long addr;
- addr = virt_to_phys(s_base);
+ addr = atari_stram_to_phys(s_base);
/* Setup Screen Memory */
shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
@@ -2113,7 +2114,7 @@ static void st_ovsc_switch(void)
static int ext_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par)
{
strcpy(fix->id, "Unknown Extern");
- fix->smem_start = (unsigned long)external_addr;
+ fix->smem_start = external_addr;
fix->smem_len = PAGE_ALIGN(external_len);
if (external_depth == 1) {
fix->type = FB_TYPE_PACKED_PIXELS;
@@ -2213,7 +2214,7 @@ static int ext_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par)
static void ext_get_par(struct atafb_par *par)
{
- par->screen_base = external_addr;
+ par->screen_base = external_screen_base;
}
static void ext_set_par(struct atafb_par *par)
@@ -2286,7 +2287,7 @@ static void set_screen_base(void *s_base)
{
unsigned long addr;
- addr = virt_to_phys(s_base);
+ addr = atari_stram_to_phys(s_base);
/* Setup Screen Memory */
shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
@@ -2433,7 +2434,9 @@ static void atafb_set_disp(struct fb_info *info)
atafb_get_var(&info->var, info);
atafb_get_fix(&info->fix, info);
- info->screen_base = (void *)info->fix.smem_start;
+ /* Note: smem_start derives from phys_screen_base, not screen_base! */
+ info->screen_base = (external_addr ? external_screen_base :
+ atari_stram_to_virt(info->fix.smem_start));
}
static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
@@ -2904,7 +2907,7 @@ static void __init atafb_setup_ext(char *spec)
external_yres = yres;
external_depth = depth;
external_pmode = planes;
- external_addr = (void *)addr;
+ external_addr = addr;
external_len = len;
if (external_card_type == IS_MV300) {
@@ -3166,30 +3169,30 @@ int __init atafb_init(void)
memset(screen_base, 0, mem_req);
pad = -(unsigned long)screen_base & (PAGE_SIZE - 1);
screen_base += pad;
- real_screen_base = screen_base + ovsc_offset;
+ phys_screen_base = atari_stram_to_phys(screen_base + ovsc_offset);
screen_len = (mem_req - pad - ovsc_offset) & PAGE_MASK;
st_ovsc_switch();
if (CPU_IS_040_OR_060) {
/* On a '040+, the cache mode of video RAM must be set to
* write-through also for internal video hardware! */
- cache_push(virt_to_phys(screen_base), screen_len);
+ cache_push(atari_stram_to_phys(screen_base), screen_len);
kernel_set_cachemode(screen_base, screen_len,
IOMAP_WRITETHROUGH);
}
- printk("atafb: screen_base %p real_screen_base %p screen_len %d\n",
- screen_base, real_screen_base, screen_len);
+ printk("atafb: screen_base %p phys_screen_base %lx screen_len %d\n",
+ screen_base, phys_screen_base, screen_len);
#ifdef ATAFB_EXT
} else {
/* Map the video memory (physical address given) to somewhere
* in the kernel address space.
*/
- external_addr = ioremap_writethrough((unsigned long)external_addr,
+ external_screen_base = ioremap_writethrough(external_addr,
external_len);
if (external_vgaiobase)
external_vgaiobase =
(unsigned long)ioremap(external_vgaiobase, 0x10000);
- screen_base =
- real_screen_base = external_addr;
+ screen_base = external_screen_base;
+ phys_screen_base = external_addr;
screen_len = external_len & PAGE_MASK;
memset (screen_base, 0, external_len);
}
@@ -3235,8 +3238,8 @@ int __init atafb_init(void)
if (register_framebuffer(&fb_info) < 0) {
#ifdef ATAFB_EXT
if (external_addr) {
- iounmap(external_addr);
- external_addr = NULL;
+ iounmap(external_screen_base);
+ external_addr = 0;
}
if (external_vgaiobase) {
iounmap((void*)external_vgaiobase);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index ff52618cafbe..5d7341520544 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1078,6 +1078,8 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
* w1_process_callbacks() - execute each dev->async_list callback entry
* @dev: w1_master device
*
+ * The w1 master list_mutex must be held.
+ *
* Return: 1 if there were commands to executed 0 otherwise
*/
int w1_process_callbacks(struct w1_master *dev)
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 734dab7fc687..56a49ba41d83 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -203,7 +203,6 @@ enum w1_master_flags {
* @search_id: allows continuing a search
* @refcnt: reference count
* @priv: private data storage
- * @priv_size: size allocated
* @enable_pullup: allows a strong pullup
* @pullup_duration: time for the next strong pullup
* @flags: one of w1_master_flags
@@ -214,7 +213,6 @@ enum w1_master_flags {
* @dev: sysfs device
* @bus_master: io operations available
* @seq: sequence number used for netlink broadcasts
- * @portid: destination for the current netlink command
*/
struct w1_master
{
@@ -241,7 +239,6 @@ struct w1_master
atomic_t refcnt;
void *priv;
- int priv_size;
/** 5V strong pullup enabled flag, 1 enabled, zero disabled. */
int enable_pullup;
@@ -260,11 +257,6 @@ struct w1_master
struct w1_bus_master *bus_master;
u32 seq;
- /* port id to send netlink responses to. The value is temporarily
- * stored here while processing a message, set after locking the
- * mutex, zero before unlocking the mutex.
- */
- u32 portid;
};
/**
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 9b084db739c7..728039d2efe1 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -219,9 +219,13 @@ void __w1_remove_master_device(struct w1_master *dev)
if (msleep_interruptible(1000))
flush_signals(current);
+ mutex_lock(&dev->list_mutex);
w1_process_callbacks(dev);
+ mutex_unlock(&dev->list_mutex);
}
+ mutex_lock(&dev->list_mutex);
w1_process_callbacks(dev);
+ mutex_unlock(&dev->list_mutex);
memset(&msg, 0, sizeof(msg));
msg.id.mst.id = dev->id;
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index a02704a59321..351a2978ba72 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -29,51 +29,247 @@
#include "w1_netlink.h"
#if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
-void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+/* Bundle together everything required to process a request in one memory
+ * allocation.
+ */
+struct w1_cb_block {
+ atomic_t refcnt;
+ u32 portid; /* Sending process port ID */
+ /* maximum value for first_cn->len */
+ u16 maxlen;
+ /* pointers to building up the reply message */
+ struct cn_msg *first_cn; /* fixed once the structure is populated */
+ struct cn_msg *cn; /* advances as cn_msg is appeneded */
+ struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */
+ struct w1_netlink_cmd *cmd; /* advances as cmds are appened */
+ struct w1_netlink_msg *cur_msg; /* currently message being processed */
+ /* copy of the original request follows */
+ struct cn_msg request_cn;
+ /* followed by variable length:
+ * cn_msg, data (w1_netlink_msg and w1_netlink_cmd)
+ * one or more struct w1_cb_node
+ * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd)
+ */
+};
+struct w1_cb_node {
+ struct w1_async_cmd async;
+ /* pointers within w1_cb_block and cn data */
+ struct w1_cb_block *block;
+ struct w1_netlink_msg *msg;
+ struct w1_slave *sl;
+ struct w1_master *dev;
+};
+
+/**
+ * w1_reply_len() - calculate current reply length, compare to maxlen
+ * @block: block to calculate
+ *
+ * Calculates the current message length including possible multiple
+ * cn_msg and data, excludes the first sizeof(struct cn_msg). Direclty
+ * compariable to maxlen and usable to send the message.
+ */
+static u16 w1_reply_len(struct w1_cb_block *block)
+{
+ if (!block->cn)
+ return 0;
+ return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
+}
+
+static void w1_unref_block(struct w1_cb_block *block)
+{
+ if (atomic_sub_return(1, &block->refcnt) == 0) {
+ u16 len = w1_reply_len(block);
+ if (len) {
+ cn_netlink_send_mult(block->first_cn, len,
+ block->portid, 0, GFP_KERNEL);
+ }
+ kfree(block);
+ }
+}
+
+/**
+ * w1_reply_make_space() - send message if needed to make space
+ * @block: block to make space on
+ * @space: how many bytes requested
+ *
+ * Verify there is enough room left for the caller to add "space" bytes to the
+ * message, if there isn't send the message and reset.
+ */
+static void w1_reply_make_space(struct w1_cb_block *block, u16 space)
+{
+ u16 len = w1_reply_len(block);
+ if (len + space >= block->maxlen) {
+ cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL);
+ block->first_cn->len = 0;
+ block->cn = NULL;
+ block->msg = NULL;
+ block->cmd = NULL;
+ }
+}
+
+/* Early send when replies aren't bundled. */
+static void w1_netlink_check_send(struct w1_cb_block *block)
+{
+ if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
+ w1_reply_make_space(block, block->maxlen);
+}
+
+/**
+ * w1_netlink_setup_msg() - prepare to write block->msg
+ * @block: block to operate on
+ * @ack: determines if cn can be reused
+ *
+ * block->cn will be setup with the correct ack, advancing if needed
+ * block->cn->len does not include space for block->msg
+ * block->msg advances but remains uninitialized
+ */
+static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack)
+{
+ if (block->cn && block->cn->ack == ack) {
+ block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
+ } else {
+ /* advance or set to data */
+ if (block->cn)
+ block->cn = (struct cn_msg *)(block->cn->data +
+ block->cn->len);
+ else
+ block->cn = block->first_cn;
+
+ memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
+ block->cn->len = 0;
+ block->cn->ack = ack;
+ block->msg = (struct w1_netlink_msg *)block->cn->data;
+ }
+}
+
+/* Append cmd to msg, include cmd->data as well. This is because
+ * any following data goes with the command and in the case of a read is
+ * the results.
+ */
+static void w1_netlink_queue_cmd(struct w1_cb_block *block,
+ struct w1_netlink_cmd *cmd)
+{
+ u32 space;
+ w1_reply_make_space(block, sizeof(struct cn_msg) +
+ sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len);
+
+ /* There's a status message sent after each command, so no point
+ * in trying to bundle this cmd after an existing one, because
+ * there won't be one. Allocate and copy over a new cn_msg.
+ */
+ w1_netlink_setup_msg(block, block->request_cn.seq + 1);
+ memcpy(block->msg, block->cur_msg, sizeof(*block->msg));
+ block->cn->len += sizeof(*block->msg);
+ block->msg->len = 0;
+ block->cmd = (struct w1_netlink_cmd *)(block->msg->data);
+
+ space = sizeof(*cmd) + cmd->len;
+ if (block->cmd != cmd)
+ memcpy(block->cmd, cmd, space);
+ block->cn->len += space;
+ block->msg->len += space;
+}
+
+/* Append req_msg and req_cmd, no other commands and no data from req_cmd are
+ * copied.
+ */
+static void w1_netlink_queue_status(struct w1_cb_block *block,
+ struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd,
+ int error)
{
- char buf[sizeof(struct cn_msg) + sizeof(struct w1_netlink_msg)];
- struct cn_msg *m = (struct cn_msg *)buf;
- struct w1_netlink_msg *w = (struct w1_netlink_msg *)(m+1);
+ u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd);
+ w1_reply_make_space(block, space);
+ w1_netlink_setup_msg(block, block->request_cn.ack);
+
+ memcpy(block->msg, req_msg, sizeof(*req_msg));
+ block->cn->len += sizeof(*req_msg);
+ block->msg->len = 0;
+ block->msg->status = (u8)-error;
+ if (req_cmd) {
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data;
+ memcpy(cmd, req_cmd, sizeof(*cmd));
+ block->cn->len += sizeof(*cmd);
+ block->msg->len += sizeof(*cmd);
+ cmd->len = 0;
+ }
+ w1_netlink_check_send(block);
+}
- memset(buf, 0, sizeof(buf));
+/**
+ * w1_netlink_send_error() - sends the error message now
+ * @cn: original cn_msg
+ * @msg: original w1_netlink_msg
+ * @portid: where to send it
+ * @error: error status
+ *
+ * Use when a block isn't available to queue the message to and cn, msg
+ * might not be contiguous.
+ */
+static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
+ int portid, int error)
+{
+ struct {
+ struct cn_msg cn;
+ struct w1_netlink_msg msg;
+ } packet;
+ memcpy(&packet.cn, cn, sizeof(packet.cn));
+ memcpy(&packet.msg, msg, sizeof(packet.msg));
+ packet.cn.len = sizeof(packet.msg);
+ packet.msg.len = 0;
+ packet.msg.status = (u8)-error;
+ cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
+}
+
+/**
+ * w1_netlink_send() - sends w1 netlink notifications
+ * @dev: w1_master the even is associated with or for
+ * @msg: w1_netlink_msg message to be sent
+ *
+ * This are notifications generated from the kernel.
+ */
+void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
+{
+ struct {
+ struct cn_msg cn;
+ struct w1_netlink_msg msg;
+ } packet;
+ memset(&packet, 0, sizeof(packet));
- m->id.idx = CN_W1_IDX;
- m->id.val = CN_W1_VAL;
+ packet.cn.id.idx = CN_W1_IDX;
+ packet.cn.id.val = CN_W1_VAL;
- m->seq = dev->seq++;
- m->len = sizeof(struct w1_netlink_msg);
+ packet.cn.seq = dev->seq++;
+ packet.cn.len = sizeof(*msg);
- memcpy(w, msg, sizeof(struct w1_netlink_msg));
+ memcpy(&packet.msg, msg, sizeof(*msg));
+ packet.msg.len = 0;
- cn_netlink_send(m, dev->portid, 0, GFP_KERNEL);
+ cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
}
static void w1_send_slave(struct w1_master *dev, u64 rn)
{
- struct cn_msg *msg = dev->priv;
- struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
- struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
- int avail;
+ struct w1_cb_block *block = dev->priv;
+ struct w1_netlink_cmd *cache_cmd = block->cmd;
u64 *data;
- avail = dev->priv_size - cmd->len;
+ w1_reply_make_space(block, sizeof(*data));
- if (avail < 8) {
- msg->ack++;
- cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
-
- msg->len = sizeof(struct w1_netlink_msg) +
- sizeof(struct w1_netlink_cmd);
- hdr->len = sizeof(struct w1_netlink_cmd);
- cmd->len = 0;
+ /* Add cmd back if the packet was sent */
+ if (!block->cmd) {
+ cache_cmd->len = 0;
+ w1_netlink_queue_cmd(block, cache_cmd);
}
- data = (void *)(cmd + 1) + cmd->len;
+ data = (u64 *)(block->cmd->data + block->cmd->len);
*data = rn;
- cmd->len += 8;
- hdr->len += 8;
- msg->len += 8;
+ block->cn->len += sizeof(*data);
+ block->msg->len += sizeof(*data);
+ block->cmd->len += sizeof(*data);
}
static void w1_found_send_slave(struct w1_master *dev, u64 rn)
@@ -85,40 +281,15 @@ static void w1_found_send_slave(struct w1_master *dev, u64 rn)
}
/* Get the current slave list, or search (with or without alarm) */
-static int w1_get_slaves(struct w1_master *dev,
- struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
- struct w1_netlink_cmd *req_cmd)
+static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd)
{
- struct cn_msg *msg;
- struct w1_netlink_msg *hdr;
- struct w1_netlink_cmd *cmd;
struct w1_slave *sl;
- msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = req_msg->id;
- msg->seq = req_msg->seq;
- msg->ack = 0;
- msg->len = sizeof(struct w1_netlink_msg) +
- sizeof(struct w1_netlink_cmd);
-
- hdr = (struct w1_netlink_msg *)(msg + 1);
- cmd = (struct w1_netlink_cmd *)(hdr + 1);
-
- hdr->type = W1_MASTER_CMD;
- hdr->id = req_hdr->id;
- hdr->len = sizeof(struct w1_netlink_cmd);
-
- cmd->cmd = req_cmd->cmd;
- cmd->len = 0;
-
- dev->priv = msg;
- dev->priv_size = PAGE_SIZE - msg->len - sizeof(struct cn_msg);
+ req_cmd->len = 0;
+ w1_netlink_queue_cmd(dev->priv, req_cmd);
if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
- __u64 rn;
+ u64 rn;
mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
memcpy(&rn, &sl->reg_num, sizeof(rn));
@@ -126,73 +297,26 @@ static int w1_get_slaves(struct w1_master *dev,
}
mutex_unlock(&dev->list_mutex);
} else {
- w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ?
+ w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ?
W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
}
- msg->ack = 0;
- cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
-
- dev->priv = NULL;
- dev->priv_size = 0;
-
- kfree(msg);
-
return 0;
}
-static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
- struct w1_netlink_cmd *cmd, u32 portid)
-{
- void *data;
- struct w1_netlink_msg *h;
- struct w1_netlink_cmd *c;
- struct cn_msg *cm;
- int err;
-
- data = kzalloc(sizeof(struct cn_msg) +
- sizeof(struct w1_netlink_msg) +
- sizeof(struct w1_netlink_cmd) +
- cmd->len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- cm = (struct cn_msg *)(data);
- h = (struct w1_netlink_msg *)(cm + 1);
- c = (struct w1_netlink_cmd *)(h + 1);
-
- memcpy(cm, msg, sizeof(struct cn_msg));
- memcpy(h, hdr, sizeof(struct w1_netlink_msg));
- memcpy(c, cmd, sizeof(struct w1_netlink_cmd));
-
- cm->ack = msg->seq+1;
- cm->len = sizeof(struct w1_netlink_msg) +
- sizeof(struct w1_netlink_cmd) + cmd->len;
-
- h->len = sizeof(struct w1_netlink_cmd) + cmd->len;
-
- memcpy(c->data, cmd->data, c->len);
-
- err = cn_netlink_send(cm, portid, 0, GFP_KERNEL);
-
- kfree(data);
-
- return err;
-}
-
-static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
- struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
+static int w1_process_command_io(struct w1_master *dev,
+ struct w1_netlink_cmd *cmd)
{
int err = 0;
switch (cmd->cmd) {
case W1_CMD_TOUCH:
w1_touch_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd, dev->portid);
+ w1_netlink_queue_cmd(dev->priv, cmd);
break;
case W1_CMD_READ:
w1_read_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd, dev->portid);
+ w1_netlink_queue_cmd(dev->priv, cmd);
break;
case W1_CMD_WRITE:
w1_write_block(dev, cmd->data, cmd->len);
@@ -206,14 +330,13 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
}
static int w1_process_command_addremove(struct w1_master *dev,
- struct cn_msg *msg, struct w1_netlink_msg *hdr,
struct w1_netlink_cmd *cmd)
{
struct w1_slave *sl;
int err = 0;
struct w1_reg_num *id;
- if (cmd->len != 8)
+ if (cmd->len != sizeof(*id))
return -EINVAL;
id = (struct w1_reg_num *)cmd->data;
@@ -241,7 +364,6 @@ static int w1_process_command_addremove(struct w1_master *dev,
}
static int w1_process_command_master(struct w1_master *dev,
- struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
struct w1_netlink_cmd *req_cmd)
{
int err = -EINVAL;
@@ -254,13 +376,13 @@ static int w1_process_command_master(struct w1_master *dev,
case W1_CMD_ALARM_SEARCH:
case W1_CMD_LIST_SLAVES:
mutex_unlock(&dev->bus_mutex);
- err = w1_get_slaves(dev, req_msg, req_hdr, req_cmd);
+ err = w1_get_slaves(dev, req_cmd);
mutex_lock(&dev->bus_mutex);
break;
case W1_CMD_READ:
case W1_CMD_WRITE:
case W1_CMD_TOUCH:
- err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd);
+ err = w1_process_command_io(dev, req_cmd);
break;
case W1_CMD_RESET:
err = w1_reset_bus(dev);
@@ -269,8 +391,7 @@ static int w1_process_command_master(struct w1_master *dev,
case W1_CMD_SLAVE_REMOVE:
mutex_unlock(&dev->bus_mutex);
mutex_lock(&dev->mutex);
- err = w1_process_command_addremove(dev, req_msg, req_hdr,
- req_cmd);
+ err = w1_process_command_addremove(dev, req_cmd);
mutex_unlock(&dev->mutex);
mutex_lock(&dev->bus_mutex);
break;
@@ -282,22 +403,21 @@ static int w1_process_command_master(struct w1_master *dev,
return err;
}
-static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
- struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
+static int w1_process_command_slave(struct w1_slave *sl,
+ struct w1_netlink_cmd *cmd)
{
dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
sl->reg_num.crc, cmd->cmd, cmd->len);
- return w1_process_command_io(sl->master, msg, hdr, cmd);
+ return w1_process_command_io(sl->master, cmd);
}
-static int w1_process_command_root(struct cn_msg *msg,
- struct w1_netlink_msg *mcmd, u32 portid)
+static int w1_process_command_root(struct cn_msg *req_cn, u32 portid)
{
- struct w1_master *m;
+ struct w1_master *dev;
struct cn_msg *cn;
- struct w1_netlink_msg *w;
+ struct w1_netlink_msg *msg;
u32 *id;
cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -307,32 +427,30 @@ static int w1_process_command_root(struct cn_msg *msg,
cn->id.idx = CN_W1_IDX;
cn->id.val = CN_W1_VAL;
- cn->seq = msg->seq;
- cn->ack = 1;
+ cn->seq = req_cn->seq;
+ cn->ack = req_cn->seq + 1;
cn->len = sizeof(struct w1_netlink_msg);
- w = (struct w1_netlink_msg *)(cn + 1);
+ msg = (struct w1_netlink_msg *)cn->data;
- w->type = W1_LIST_MASTERS;
- w->status = 0;
- w->len = 0;
- id = (u32 *)(w + 1);
+ msg->type = W1_LIST_MASTERS;
+ msg->status = 0;
+ msg->len = 0;
+ id = (u32 *)msg->data;
mutex_lock(&w1_mlock);
- list_for_each_entry(m, &w1_masters, w1_master_entry) {
+ list_for_each_entry(dev, &w1_masters, w1_master_entry) {
if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
cn_netlink_send(cn, portid, 0, GFP_KERNEL);
- cn->ack++;
cn->len = sizeof(struct w1_netlink_msg);
- w->len = 0;
- id = (u32 *)(w + 1);
+ msg->len = 0;
+ id = (u32 *)msg->data;
}
- *id = m->id;
- w->len += sizeof(*id);
+ *id = dev->id;
+ msg->len += sizeof(*id);
cn->len += sizeof(*id);
id++;
}
- cn->ack = 0;
cn_netlink_send(cn, portid, 0, GFP_KERNEL);
mutex_unlock(&w1_mlock);
@@ -340,100 +458,44 @@ static int w1_process_command_root(struct cn_msg *msg,
return 0;
}
-static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
- struct w1_netlink_cmd *rcmd, int portid, int error)
-{
- struct cn_msg *cmsg;
- struct w1_netlink_msg *msg;
- struct w1_netlink_cmd *cmd;
-
- cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL);
- if (!cmsg)
- return -ENOMEM;
-
- msg = (struct w1_netlink_msg *)(cmsg + 1);
- cmd = (struct w1_netlink_cmd *)(msg + 1);
-
- memcpy(cmsg, rcmsg, sizeof(*cmsg));
- cmsg->len = sizeof(*msg);
-
- memcpy(msg, rmsg, sizeof(*msg));
- msg->len = 0;
- msg->status = (short)-error;
-
- if (rcmd) {
- memcpy(cmd, rcmd, sizeof(*cmd));
- cmd->len = 0;
- msg->len += sizeof(*cmd);
- cmsg->len += sizeof(*cmd);
- }
-
- error = cn_netlink_send(cmsg, portid, 0, GFP_KERNEL);
- kfree(cmsg);
-
- return error;
-}
-
-/* Bundle together a reference count, the full message, and broken out
- * commands to be executed on each w1 master kthread in one memory allocation.
- */
-struct w1_cb_block {
- atomic_t refcnt;
- u32 portid; /* Sending process port ID */
- struct cn_msg msg;
- /* cn_msg data */
- /* one or more variable length struct w1_cb_node */
-};
-struct w1_cb_node {
- struct w1_async_cmd async;
- /* pointers within w1_cb_block and msg data */
- struct w1_cb_block *block;
- struct w1_netlink_msg *m;
- struct w1_slave *sl;
- struct w1_master *dev;
-};
-
static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
{
struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
async);
- u16 mlen = node->m->len;
- u8 *cmd_data = node->m->data;
+ u16 mlen = node->msg->len;
+ u16 len;
int err = 0;
struct w1_slave *sl = node->sl;
- struct w1_netlink_cmd *cmd = NULL;
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data;
mutex_lock(&dev->bus_mutex);
- dev->portid = node->block->portid;
+ dev->priv = node->block;
if (sl && w1_reset_select_slave(sl))
err = -ENODEV;
+ node->block->cur_msg = node->msg;
while (mlen && !err) {
- cmd = (struct w1_netlink_cmd *)cmd_data;
-
if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
err = -E2BIG;
break;
}
if (sl)
- err = w1_process_command_slave(sl, &node->block->msg,
- node->m, cmd);
+ err = w1_process_command_slave(sl, cmd);
else
- err = w1_process_command_master(dev, &node->block->msg,
- node->m, cmd);
+ err = w1_process_command_master(dev, cmd);
+ w1_netlink_check_send(node->block);
- w1_netlink_send_error(&node->block->msg, node->m, cmd,
- node->block->portid, err);
+ w1_netlink_queue_status(node->block, node->msg, cmd, err);
err = 0;
- cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
- mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
+ len = sizeof(*cmd) + cmd->len;
+ cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
+ mlen -= len;
}
if (!cmd || err)
- w1_netlink_send_error(&node->block->msg, node->m, cmd,
- node->block->portid, err);
+ w1_netlink_queue_status(node->block, node->msg, cmd, err);
/* ref taken in w1_search_slave or w1_search_master_id when building
* the block
@@ -442,99 +504,186 @@ static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
w1_unref_slave(sl);
else
atomic_dec(&dev->refcnt);
- dev->portid = 0;
+ dev->priv = NULL;
mutex_unlock(&dev->bus_mutex);
mutex_lock(&dev->list_mutex);
list_del(&async_cmd->async_entry);
mutex_unlock(&dev->list_mutex);
- if (atomic_sub_return(1, &node->block->refcnt) == 0)
- kfree(node->block);
+ w1_unref_block(node->block);
+}
+
+static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count,
+ u16 *slave_len)
+{
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data;
+ u16 mlen = msg->len;
+ u16 len;
+ int slave_list = 0;
+ while (mlen) {
+ if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen)
+ break;
+
+ switch (cmd->cmd) {
+ case W1_CMD_SEARCH:
+ case W1_CMD_ALARM_SEARCH:
+ case W1_CMD_LIST_SLAVES:
+ ++slave_list;
+ }
+ ++*cmd_count;
+ len = sizeof(*cmd) + cmd->len;
+ cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
+ mlen -= len;
+ }
+
+ if (slave_list) {
+ struct w1_master *dev = w1_search_master_id(msg->id.mst.id);
+ if (dev) {
+ /* Bytes, and likely an overstimate, and if it isn't
+ * the results can still be split between packets.
+ */
+ *slave_len += sizeof(struct w1_reg_num) * slave_list *
+ (dev->slave_count + dev->max_slave_count);
+ /* search incremented it */
+ atomic_dec(&dev->refcnt);
+ }
+ }
}
-static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
{
- struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
+ struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1);
struct w1_slave *sl;
struct w1_master *dev;
u16 msg_len;
+ u16 slave_len = 0;
int err = 0;
struct w1_cb_block *block = NULL;
struct w1_cb_node *node = NULL;
int node_count = 0;
+ int cmd_count = 0;
+
+ /* If any unknown flag is set let the application know, that way
+ * applications can detect the absence of features in kernels that
+ * don't know about them. http://lwn.net/Articles/587527/
+ */
+ if (cn->flags & ~(W1_CN_BUNDLE)) {
+ w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL);
+ return;
+ }
/* Count the number of master or slave commands there are to allocate
* space for one cb_node each.
*/
- msg_len = msg->len;
+ msg_len = cn->len;
while (msg_len && !err) {
- if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
+ if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
- if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD)
+ /* count messages for nodes and allocate any additional space
+ * required for slave lists
+ */
+ if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) {
++node_count;
+ w1_list_count_cmds(msg, &cmd_count, &slave_len);
+ }
- msg_len -= sizeof(struct w1_netlink_msg) + m->len;
- m = (struct w1_netlink_msg *)(((u8 *)m) +
- sizeof(struct w1_netlink_msg) + m->len);
+ msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
+ msg = (struct w1_netlink_msg *)(((u8 *)msg) +
+ sizeof(struct w1_netlink_msg) + msg->len);
}
- m = (struct w1_netlink_msg *)(msg + 1);
+ msg = (struct w1_netlink_msg *)(cn + 1);
if (node_count) {
- /* msg->len doesn't include itself */
- long size = sizeof(struct w1_cb_block) + msg->len +
- node_count*sizeof(struct w1_cb_node);
- block = kmalloc(size, GFP_KERNEL);
+ int size;
+ u16 reply_size = sizeof(*cn) + cn->len + slave_len;
+ if (cn->flags & W1_CN_BUNDLE) {
+ /* bundling duplicats some of the messages */
+ reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +
+ sizeof(struct w1_netlink_msg) +
+ sizeof(struct w1_netlink_cmd));
+ }
+ reply_size = MIN(CONNECTOR_MAX_MSG_SIZE, reply_size);
+
+ /* allocate space for the block, a copy of the original message,
+ * one node per cmd to point into the original message,
+ * space for replies which is the original message size plus
+ * space for any list slave data and status messages
+ * cn->len doesn't include itself which is part of the block
+ * */
+ size = /* block + original message */
+ sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len +
+ /* space for nodes */
+ node_count * sizeof(struct w1_cb_node) +
+ /* replies */
+ sizeof(struct cn_msg) + reply_size;
+ block = kzalloc(size, GFP_KERNEL);
if (!block) {
- w1_netlink_send_error(msg, m, NULL, nsp->portid,
- -ENOMEM);
+ /* if the system is already out of memory,
+ * (A) will this work, and (B) would it be better
+ * to not try?
+ */
+ w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM);
return;
}
atomic_set(&block->refcnt, 1);
block->portid = nsp->portid;
- memcpy(&block->msg, msg, sizeof(*msg) + msg->len);
- node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len);
+ memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
+ node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
+
+ /* Sneeky, when not bundling, reply_size is the allocated space
+ * required for the reply, cn_msg isn't part of maxlen so
+ * it should be reply_size - sizeof(struct cn_msg), however
+ * when checking if there is enough space, w1_reply_make_space
+ * is called with the full message size including cn_msg,
+ * because it isn't known at that time if an additional cn_msg
+ * will need to be allocated. So an extra cn_msg is added
+ * above in "size".
+ */
+ block->maxlen = reply_size;
+ block->first_cn = (struct cn_msg *)(node + node_count);
+ memset(block->first_cn, 0, sizeof(*block->first_cn));
}
- msg_len = msg->len;
+ msg_len = cn->len;
while (msg_len && !err) {
dev = NULL;
sl = NULL;
- if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
+ if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
/* execute on this thread, no need to process later */
- if (m->type == W1_LIST_MASTERS) {
- err = w1_process_command_root(msg, m, nsp->portid);
+ if (msg->type == W1_LIST_MASTERS) {
+ err = w1_process_command_root(cn, nsp->portid);
goto out_cont;
}
/* All following message types require additional data,
* check here before references are taken.
*/
- if (!m->len) {
+ if (!msg->len) {
err = -EPROTO;
goto out_cont;
}
- /* both search calls take reference counts */
- if (m->type == W1_MASTER_CMD) {
- dev = w1_search_master_id(m->id.mst.id);
- } else if (m->type == W1_SLAVE_CMD) {
- sl = w1_search_slave((struct w1_reg_num *)m->id.id);
+ /* both search calls take references */
+ if (msg->type == W1_MASTER_CMD) {
+ dev = w1_search_master_id(msg->id.mst.id);
+ } else if (msg->type == W1_SLAVE_CMD) {
+ sl = w1_search_slave((struct w1_reg_num *)msg->id.id);
if (sl)
dev = sl->master;
} else {
printk(KERN_NOTICE
- "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
- __func__, msg->id.idx, msg->id.val,
- m->type, m->len);
+ "%s: cn: %x.%x, wrong type: %u, len: %u.\n",
+ __func__, cn->id.idx, cn->id.val,
+ msg->type, msg->len);
err = -EPROTO;
goto out_cont;
}
@@ -549,8 +698,8 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
atomic_inc(&block->refcnt);
node->async.cb = w1_process_cb;
node->block = block;
- node->m = (struct w1_netlink_msg *)((u8 *)&block->msg +
- (size_t)((u8 *)m - (u8 *)msg));
+ node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
+ (size_t)((u8 *)msg - (u8 *)cn));
node->sl = sl;
node->dev = dev;
@@ -561,11 +710,15 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
++node;
out_cont:
+ /* Can't queue because that modifies block and another
+ * thread could be processing the messages by now and
+ * there isn't a lock, send directly.
+ */
if (err)
- w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
- msg_len -= sizeof(struct w1_netlink_msg) + m->len;
- m = (struct w1_netlink_msg *)(((u8 *)m) +
- sizeof(struct w1_netlink_msg) + m->len);
+ w1_netlink_send_error(cn, msg, nsp->portid, err);
+ msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
+ msg = (struct w1_netlink_msg *)(((u8 *)msg) +
+ sizeof(struct w1_netlink_msg) + msg->len);
/*
* Let's allow requests for nonexisting devices.
@@ -573,8 +726,8 @@ out_cont:
if (err == -ENODEV)
err = 0;
}
- if (block && atomic_sub_return(1, &block->refcnt) == 0)
- kfree(block);
+ if (block)
+ w1_unref_block(block);
}
int w1_init_netlink(void)
@@ -591,7 +744,7 @@ void w1_fini_netlink(void)
cn_del_callback(&w1_id);
}
#else
-void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
+void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn)
{
}
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 1e9504e67650..c99a9ce05e62 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -28,6 +28,17 @@
#include "w1.h"
/**
+ * enum w1_cn_msg_flags - bitfield flags for struct cn_msg.flags
+ *
+ * @W1_CN_BUNDLE: Request bundling replies into fewer messagse. Be prepared
+ * to handle multiple struct cn_msg, struct w1_netlink_msg, and
+ * struct w1_netlink_cmd in one packet.
+ */
+enum w1_cn_msg_flags {
+ W1_CN_BUNDLE = 1,
+};
+
+/**
* enum w1_netlink_message_types - message type
*
* @W1_SLAVE_ADD: notification that a slave device was added
@@ -49,6 +60,19 @@ enum w1_netlink_message_types {
W1_LIST_MASTERS,
};
+/**
+ * struct w1_netlink_msg - holds w1 message type, id, and result
+ *
+ * @type: one of enum w1_netlink_message_types
+ * @status: kernel feedback for success 0 or errno failure value
+ * @len: length of data following w1_netlink_msg
+ * @id: union holding master bus id (msg.id) and slave device id (id[8]).
+ * @data: start address of any following data
+ *
+ * The base message structure for w1 messages over netlink.
+ * The netlink connector data sequence is, struct nlmsghdr, struct cn_msg,
+ * then one or more struct w1_netlink_msg (each with optional data).
+ */
struct w1_netlink_msg
{
__u8 type;
@@ -66,6 +90,7 @@ struct w1_netlink_msg
/**
* enum w1_commands - commands available for master or slave operations
+ *
* @W1_CMD_READ: read len bytes
* @W1_CMD_WRITE: write len bytes
* @W1_CMD_SEARCH: initiate a standard search, returns only the slave
@@ -93,6 +118,17 @@ enum w1_commands {
W1_CMD_MAX
};
+/**
+ * struct w1_netlink_cmd - holds the command and data
+ *
+ * @cmd: one of enum w1_commands
+ * @res: reserved
+ * @len: length of data following w1_netlink_cmd
+ * @data: start address of any following data
+ *
+ * One or more struct w1_netlink_cmd is placed starting at w1_netlink_msg.data
+ * each with optional data.
+ */
struct w1_netlink_cmd
{
__u8 cmd;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 32f9236c959f..c3667b202f2f 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -41,9 +41,6 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
struct suspend_info {
int cancelled;
- unsigned long arg; /* extra hypercall argument */
- void (*pre)(void);
- void (*post)(int cancelled);
};
static RAW_NOTIFIER_HEAD(xen_resume_notifier);
@@ -61,26 +58,6 @@ void xen_resume_notifier_unregister(struct notifier_block *nb)
EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister);
#ifdef CONFIG_HIBERNATE_CALLBACKS
-static void xen_hvm_post_suspend(int cancelled)
-{
- xen_arch_hvm_post_suspend(cancelled);
- gnttab_resume();
-}
-
-static void xen_pre_suspend(void)
-{
- xen_mm_pin_all();
- gnttab_suspend();
- xen_arch_pre_suspend();
-}
-
-static void xen_post_suspend(int cancelled)
-{
- xen_arch_post_suspend(cancelled);
- gnttab_resume();
- xen_mm_unpin_all();
-}
-
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -94,18 +71,20 @@ static int xen_suspend(void *data)
return err;
}
- if (si->pre)
- si->pre();
+ gnttab_suspend();
+ xen_arch_pre_suspend();
/*
* This hypercall returns 1 if suspend was cancelled
* or the domain was merely checkpointed, and 0 if it
* is resuming in a new domain.
*/
- si->cancelled = HYPERVISOR_suspend(si->arg);
+ si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
+ ? virt_to_mfn(xen_start_info)
+ : 0);
- if (si->post)
- si->post(si->cancelled);
+ xen_arch_post_suspend(si->cancelled);
+ gnttab_resume();
if (!si->cancelled) {
xen_irq_resume();
@@ -154,16 +133,6 @@ static void do_suspend(void)
si.cancelled = 1;
- if (xen_hvm_domain()) {
- si.arg = 0UL;
- si.pre = NULL;
- si.post = &xen_hvm_post_suspend;
- } else {
- si.arg = virt_to_mfn(xen_start_info);
- si.pre = &xen_pre_suspend;
- si.post = &xen_post_suspend;
- }
-
err = stop_machine(xen_suspend, &si, cpumask_of(0));
raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 82358d14ecf1..59fc190f1e92 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -127,7 +127,7 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
pr_debug(" C%d: %s %d uS\n",
cx->type, cx->desc, (u32)cx->latency);
}
- } else if (ret != -EINVAL)
+ } else if ((ret != -EINVAL) && (ret != -ENOSYS))
/* EINVAL means the ACPI ID is incorrect - meaning the ACPI
* table is referencing a non-existing CPU - which can happen
* with broken ACPI tables. */
@@ -259,7 +259,7 @@ static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
}
- } else if (ret != -EINVAL)
+ } else if ((ret != -EINVAL) && (ret != -ENOSYS))
/* EINVAL means the ACPI ID is incorrect - meaning the ACPI
* table is referencing a non-existing CPU - which can happen
* with broken ACPI tables. */
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 62fcd485f0a7..d57a173685f3 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -242,6 +242,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
return found_dev;
}
+/*
+ * Called when:
+ * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
+ * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
+ * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
+ * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
+ *
+ * As such we have to be careful.
+ */
void pcistub_put_pci_dev(struct pci_dev *dev)
{
struct pcistub_device *psdev, *found_psdev = NULL;
@@ -272,16 +281,16 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
* and want to inhibit the user from fiddling with 'reset'
*/
pci_reset_function(dev);
- pci_restore_state(psdev->dev);
+ pci_restore_state(dev);
/* This disables the device. */
- xen_pcibk_reset_device(found_psdev->dev);
+ xen_pcibk_reset_device(dev);
/* And cleanup up our emulated fields. */
- xen_pcibk_config_free_dyn_fields(found_psdev->dev);
- xen_pcibk_config_reset_dev(found_psdev->dev);
+ xen_pcibk_config_reset_dev(dev);
+ xen_pcibk_config_free_dyn_fields(dev);
- xen_unregister_device_domain_owner(found_psdev->dev);
+ xen_unregister_device_domain_owner(dev);
spin_lock_irqsave(&found_psdev->lock, flags);
found_psdev->pdev = NULL;
@@ -493,6 +502,8 @@ static int pcistub_seize(struct pci_dev *dev)
return err;
}
+/* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
+ * other functions that take the sysfs lock. */
static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err = 0;
@@ -520,6 +531,8 @@ out:
return err;
}
+/* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
+ * other functions that take the sysfs lock. */
static void pcistub_remove(struct pci_dev *dev)
{
struct pcistub_device *psdev, *found_psdev = NULL;
@@ -551,6 +564,8 @@ static void pcistub_remove(struct pci_dev *dev)
pr_warn("****** shutdown driver domain before binding device\n");
pr_warn("****** to other drivers or domains\n");
+ /* N.B. This ends up calling pcistub_put_pci_dev which ends up
+ * doing the FLR. */
xen_pcibk_release_pci_dev(found_psdev->pdev,
found_psdev->dev);
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index a9ed867afaba..4a7e6e0a5f4c 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -93,6 +93,8 @@ static void free_pdev(struct xen_pcibk_device *pdev)
xen_pcibk_disconnect(pdev);
+ /* N.B. This calls pcistub_put_pci_dev which does the FLR on all
+ * of the PCIe devices. */
xen_pcibk_release_devices(pdev);
dev_set_drvdata(&pdev->xdev->dev, NULL);
@@ -286,6 +288,8 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
xen_unregister_device_domain_owner(dev);
+ /* N.B. This ends up calling pcistub_put_pci_dev which ends up
+ * doing the FLR. */
xen_pcibk_release_pci_dev(pdev, dev);
out: