summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c1
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_lpss.c33
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c3
-rw-r--r--drivers/acpi/apei/bert.c10
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c19
-rw-r--r--drivers/acpi/apei/hest.c2
-rw-r--r--drivers/acpi/arm64/Kconfig10
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/agdi.c116
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/battery.c12
-rw-r--r--drivers/acpi/bus.c46
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/ec.c100
-rw-r--r--drivers/acpi/fan.h44
-rw-r--r--drivers/acpi/fan_attr.c137
-rw-r--r--drivers/acpi/fan_core.c (renamed from drivers/acpi/fan.c)204
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/osl.c19
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_idle.c20
-rw-r--r--drivers/acpi/property.c29
-rw-r--r--drivers/acpi/scan.c15
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video_detect.c75
-rw-r--r--drivers/acpi/x86/s2idle.c12
-rw-r--r--drivers/acpi/x86/utils.c21
-rw-r--r--drivers/amba/bus.c73
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/acard-ahci.c2
-rw-r--r--drivers/ata/ahci.c113
-rw-r--r--drivers/ata/ahci.h4
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_octeon.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/ata_piix.c5
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libahci_platform.c3
-rw-r--r--drivers/ata/libata-acpi.c29
-rw-r--r--drivers/ata/libata-core.c47
-rw-r--r--drivers/ata/libata-eh.c49
-rw-r--r--drivers/ata/libata-sata.c10
-rw-r--r--drivers/ata/libata-scsi.c95
-rw-r--r--drivers/ata/libata-sff.c136
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/ata/pata_artop.c31
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_cs5520.c5
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_ftide010.c6
-rw-r--r--drivers/ata/pata_hpt366.c49
-rw-r--r--drivers/ata/pata_hpt37x.c133
-rw-r--r--drivers/ata/pata_hpt3x2n.c38
-rw-r--r--drivers/ata/pata_imx.c15
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c24
-rw-r--r--drivers/ata/pata_mpc52xx.c7
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c20
-rw-r--r--drivers/ata/pata_pxa.c10
-rw-r--r--drivers/ata/pata_samsung_cf.c12
-rw-r--r--drivers/ata/pata_triflex.c5
-rw-r--r--drivers/ata/sata_fsl.c30
-rw-r--r--drivers/ata/sata_gemini.c6
-rw-r--r--drivers/ata/sata_highbank.c7
-rw-r--r--drivers/ata/sata_inic162x.c10
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_rcar.c35
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/auxdisplay/lcd2s.c24
-rw-r--r--drivers/base/arch_topology.c45
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c147
-rw-r--r--drivers/base/node.c48
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/base/power/wakeirq.c2
-rw-r--r--drivers/base/power/wakeup.c45
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-irq.c26
-rw-r--r--drivers/base/regmap/regmap.c11
-rw-r--r--drivers/base/topology.c20
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoecmd.c5
-rw-r--r--drivers/block/drbd/drbd_actlog.c5
-rw-r--r--drivers/block/drbd/drbd_bitmap.c7
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_receiver.c36
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c10
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/loop.c115
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h1
-rw-r--r--drivers/block/null_blk/main.c54
-rw-r--r--drivers/block/pktcdvd.c21
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c28
-rw-r--r--drivers/block/rnbd/rnbd-clt.h1
-rw-r--r--drivers/block/rnbd/rnbd-proto.h4
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c61
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h18
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c1
-rw-r--r--drivers/block/rnbd/rnbd-srv.c46
-rw-r--r--drivers/block/rnbd/rnbd-srv.h1
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/virtio_blk.c94
-rw-r--r--drivers/block/xen-blkback/blkback.c25
-rw-r--r--drivers/block/xen-blkback/xenbus.c1
-rw-r--r--drivers/block/xen-blkfront.c70
-rw-r--r--drivers/block/zram/zram_drv.c26
-rw-r--r--drivers/bus/mhi/pci_generic.c2
-rw-r--r--drivers/bus/moxtet.c4
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c148
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/core.c162
-rw-r--r--drivers/char/hw_random/nomadik-rng.c4
-rw-r--r--drivers/char/random.c2858
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c5
-rw-r--r--drivers/char/tpm/st33zp24/spi.c9
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c3
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h2
-rw-r--r--drivers/char/tpm/tpm-chip.c46
-rw-r--r--drivers/char/tpm/tpm-dev-common.c8
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-space.c73
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c3
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c8
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/clk-lmk04832.c4
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c3
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c5
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c106
-rw-r--r--drivers/clk/qcom/gdsc.c26
-rw-r--r--drivers/clk/qcom/gdsc.h8
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c13
-rw-r--r--drivers/clocksource/exynos_mct.c39
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c2
-rw-r--r--drivers/clocksource/timer-imx-tpm.c14
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c8
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c7
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/counter/counter-core.c15
-rw-r--r--drivers/counter/counter-sysfs.c17
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h22
-rw-r--r--drivers/cpufreq/amd-pstate.c59
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.h12
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c11
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c4
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c3
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c2
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h2
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c83
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c16
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c6
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c43
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c59
-rw-r--r--drivers/crypto/marvell/Kconfig1
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c5
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c27
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c59
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c17
-rw-r--r--drivers/crypto/mxs-dcp.c2
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c4
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h24
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.c137
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.h44
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/qcom-rng.c17
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/Kconfig4
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c264
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c17
-rw-r--r--drivers/dma/sh/rcar-dmac.c9
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/edac/altera_edac.c42
-rw-r--r--drivers/edac/amd64_edac.c109
-rw-r--r--drivers/edac/amd64_edac.h24
-rw-r--r--drivers/edac/edac_device_sysfs.c31
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci_sysfs.c26
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/arm_scmi/driver.c2
-rw-r--r--drivers/firmware/arm_sdei.c13
-rw-r--r--drivers/firmware/efi/apple-properties.c2
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c9
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c17
-rw-r--r--drivers/firmware/efi/mokvar-table.c2
-rw-r--r--drivers/firmware/efi/vars.c5
-rw-r--r--drivers/firmware/xilinx/zynqmp.c26
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-aggregator.c18
-rw-r--r--drivers/gpio/gpio-max3191x.c4
-rw-r--r--drivers/gpio/gpio-max7301.c4
-rw-r--r--drivers/gpio/gpio-mc33880.c4
-rw-r--r--drivers/gpio/gpio-mt7621.c1
-rw-r--r--drivers/gpio/gpio-omap.c7
-rw-r--r--drivers/gpio/gpio-pisosr.c4
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-rockchip.c56
-rw-r--r--drivers/gpio/gpio-sifive.c2
-rw-r--r--drivers/gpio/gpio-sim.c23
-rw-r--r--drivers/gpio/gpio-tegra186.c16
-rw-r--r--drivers/gpio/gpio-tqmx86.c3
-rw-r--r--drivers/gpio/gpio-ts4900.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib-cdev.c6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.c20
-rw-r--r--drivers/gpio/gpiolib.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h11
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c9
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c5
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c14
-rw-r--r--drivers/gpu/drm/drm_cache.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c2
-rw-r--r--drivers/gpu/drm/drm_privacy_screen_x86.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c14
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c108
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c114
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c169
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c167
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c21
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c4
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c3
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c3
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h8
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c7
-rw-r--r--drivers/gpu/drm/tegra/falcon.c2
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c4
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c4
-rw-r--r--drivers/gpu/drm/tiny/st7586.c4
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/host1x/syncpt.c35
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c76
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h4
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c4
-rw-r--r--drivers/hid/hid-apple.c16
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-logitech-dj.c1
-rw-r--r--drivers/hid/hid-nintendo.c4
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-thrustmaster.c8
-rw-r--r--drivers/hid/hid-vivaldi.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c28
-rw-r--r--drivers/hv/hv_balloon.c7
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/Kconfig49
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adcxx.c4
-rw-r--r--drivers/hwmon/adt7310.c95
-rw-r--r--drivers/hwmon/adt7410.c82
-rw-r--r--drivers/hwmon/adt7470.c3
-rw-r--r--drivers/hwmon/adt7x10.c479
-rw-r--r--drivers/hwmon/adt7x10.h10
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c379
-rw-r--r--drivers/hwmon/asus-ec-sensors.c716
-rw-r--r--drivers/hwmon/asus_wmi_ec_sensors.c3
-rw-r--r--drivers/hwmon/asus_wmi_sensors.c1
-rw-r--r--drivers/hwmon/axi-fan-control.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c84
-rw-r--r--drivers/hwmon/hwmon.c54
-rw-r--r--drivers/hwmon/lm70.c16
-rw-r--r--drivers/hwmon/lm83.c476
-rw-r--r--drivers/hwmon/lm90.c21
-rw-r--r--drivers/hwmon/max1111.c3
-rw-r--r--drivers/hwmon/max31722.c4
-rw-r--r--drivers/hwmon/max6639.c62
-rw-r--r--drivers/hwmon/mlxreg-fan.c84
-rw-r--r--drivers/hwmon/nct6775.c152
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c19
-rw-r--r--drivers/hwmon/occ/common.h2
-rw-r--r--drivers/hwmon/occ/sysfs.c46
-rw-r--r--drivers/hwmon/pmbus/Kconfig33
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/adm1275.c40
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/lm25066.c14
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c146
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c137
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c32
-rw-r--r--drivers/hwmon/powr1220.c235
-rw-r--r--drivers/hwmon/sch5627.c71
-rw-r--r--drivers/hwmon/sch5636.c10
-rw-r--r--drivers/hwmon/sch56xx-common.c44
-rw-r--r--drivers/hwmon/scpi-hwmon.c6
-rw-r--r--drivers/hwmon/tc654.c104
-rw-r--r--drivers/hwmon/tmp464.c712
-rw-r--r--drivers/hwmon/vexpress-hwmon.c6
-rw-r--r--drivers/hwtracing/intel_th/msu.c4
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c11
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c16
-rw-r--r--drivers/idle/intel_idle.c111
-rw-r--r--drivers/iio/accel/bma400_spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c5
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c4
-rw-r--r--drivers/iio/accel/fxls8962af-core.c12
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/mma7455_spi.c4
-rw-r--r--drivers/iio/accel/mma9551.c5
-rw-r--r--drivers/iio/accel/mma9553.c5
-rw-r--r--drivers/iio/accel/sca3000.c4
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7266.c4
-rw-r--r--drivers/iio/adc/ltc2496.c4
-rw-r--r--drivers/iio/adc/mcp320x.c4
-rw-r--r--drivers/iio/adc/mcp3911.c4
-rw-r--r--drivers/iio/adc/men_z188_adc.c9
-rw-r--r--drivers/iio/adc/ti-adc12138.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c4
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/adc/ti-tlc4541.c4
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/addac/ad74413r.c17
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c4
-rw-r--r--drivers/iio/dac/ad5360.c4
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5446.c4
-rw-r--r--drivers/iio/dac/ad5449.c4
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/dac/ad5686-spi.c4
-rw-r--r--drivers/iio/dac/ad5761.c4
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/ad8801.c4
-rw-r--r--drivers/iio/dac/ltc1660.c4
-rw-r--r--drivers/iio/dac/ltc2632.c4
-rw-r--r--drivers/iio/dac/mcp4922.c4
-rw-r--r--drivers/iio/dac/ti-dac082s085.c4
-rw-r--r--drivers/iio/dac/ti-dac7311.c3
-rw-r--r--drivers/iio/frequency/adf4350.c4
-rw-r--r--drivers/iio/frequency/admv1013.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c5
-rw-r--r--drivers/iio/gyro/bmg160_spi.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/imu/adis16480.c7
-rw-r--r--drivers/iio/imu/kmx61.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c6
-rw-r--r--drivers/iio/industrialio-buffer.c14
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c5
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c4
-rw-r--r--drivers/iio/potentiometer/max5487.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c4
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cma.c62
-rw-r--r--drivers/infiniband/core/rw.c1
-rw-r--r--drivers/infiniband/core/ucma.c34
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c38
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c20
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c39
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/applespi.c4
-rw-r--r--drivers/input/misc/adxl34x-spi.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c64
-rw-r--r--drivers/input/mouse/psmouse-smbus.c10
-rw-r--r--drivers/input/tablet/aiptek.c10
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c4
-rw-r--r--drivers/input/touchscreen/goodix.c34
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c12
-rw-r--r--drivers/input/touchscreen/zinitix.c56
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c12
-rw-r--r--drivers/iommu/amd/io_pgtable.c12
-rw-r--r--drivers/iommu/amd/iommu.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c5
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/iommu.c6
-rw-r--r--drivers/iommu/intel/irq_remapping.c13
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/ioasid.c38
-rw-r--r--drivers/iommu/iommu-sva-lib.c39
-rw-r--r--drivers/iommu/iommu-sva-lib.h7
-rw-r--r--drivers/iommu/iommu.c33
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c552
-rw-r--r--drivers/irqchip/irq-ftintc010.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c126
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c104
-rw-r--r--drivers/irqchip/irq-imx-intmux.c8
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c34
-rw-r--r--drivers/irqchip/irq-meson-gpio.c106
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c28
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c461
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c18
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c3
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c3
-rw-r--r--drivers/irqchip/irq-sifive-plic.c39
-rw-r--r--drivers/irqchip/irq-stm32-exti.c50
-rw-r--r--drivers/irqchip/irq-ts4800.c25
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c46
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c30
-rw-r--r--drivers/irqchip/qcom-pdc.c137
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c6
-rw-r--r--drivers/leds/leds-cr0014114.c4
-rw-r--r--drivers/leds/leds-dac124s085.c4
-rw-r--r--drivers/leds/leds-el15203000.c4
-rw-r--r--drivers/leds/leds-spi-byte.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c16
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c26
-rw-r--r--drivers/md/bcache/super.c9
-rw-r--r--drivers/md/bcache/writeback.c21
-rw-r--r--drivers/md/dm-cache-target.c26
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c46
-rw-r--r--drivers/md/dm-integrity.c5
-rw-r--r--drivers/md/dm-io.c5
-rw-r--r--drivers/md/dm-log-writes.c39
-rw-r--r--drivers/md/dm-rq.c26
-rw-r--r--drivers/md/dm-snap.c21
-rw-r--r--drivers/md/dm-thin.c41
-rw-r--r--drivers/md/dm-writecache.c7
-rw-r--r--drivers/md/dm-zoned-metadata.c26
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c194
-rw-r--r--drivers/md/md-faulty.c4
-rw-r--r--drivers/md/md-multipath.c13
-rw-r--r--drivers/md/md.c39
-rw-r--r--drivers/md/raid1-10.c5
-rw-r--r--drivers/md/raid1.c58
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c42
-rw-r--r--drivers/md/raid5-ppl.c29
-rw-r--r--drivers/md/raid5.c29
-rw-r--r--drivers/media/spi/cxd2880-spi.c4
-rw-r--r--drivers/media/spi/gs1662.c4
-rw-r--r--drivers/media/tuners/msi001.c3
-rw-r--r--drivers/memstick/core/ms_block.c64
-rw-r--r--drivers/memstick/core/ms_block.h1
-rw-r--r--drivers/memstick/core/mspro_block.c57
-rw-r--r--drivers/mfd/arizona-spi.c4
-rw-r--r--drivers/mfd/da9052-spi.c3
-rw-r--r--drivers/mfd/ezx-pcap.c8
-rw-r--r--drivers/mfd/lpc_ich.c59
-rw-r--r--drivers/mfd/madera-spi.c4
-rw-r--r--drivers/mfd/mc13xxx-spi.c3
-rw-r--r--drivers/mfd/rsmu_spi.c4
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/mfd/tps65912-spi.c4
-rw-r--r--drivers/misc/ad525x_dpot-spi.c3
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/eeprom/ee1004.c3
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c4
-rw-r--r--drivers/misc/fastrpc.c9
-rw-r--r--drivers/misc/hi6421v600-irq.c6
-rw-r--r--drivers/misc/lattice-ecp3-config.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c4
-rw-r--r--drivers/mmc/core/block.c30
-rw-r--r--drivers/mmc/core/bus.c9
-rw-r--r--drivers/mmc/core/bus.h3
-rw-r--r--drivers/mmc/core/host.c24
-rw-r--r--drivers/mmc/core/mmc.c39
-rw-r--r--drivers/mmc/core/mmc_ops.c13
-rw-r--r--drivers/mmc/core/mmc_ops.h3
-rw-r--r--drivers/mmc/core/sd.c35
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/davinci_mmc.c12
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c27
-rw-r--r--drivers/mmc/host/dw_mmc.c12
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/litex_mmc.c661
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c15
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c29
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c6
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c18
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c133
-rw-r--r--drivers/mmc/host/sdhci-tegra.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c28
-rw-r--r--drivers/mmc/host/sh_mmcif.c10
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c7
-rw-r--r--drivers/mtd/devices/mchp23k256.c4
-rw-r--r--drivers/mtd/devices/mchp48l640.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/devices/phram.c12
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c3
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c14
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c1
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c36
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig36
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile3
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h21
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c33
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c1
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c4
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/spi/hi311x.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h8
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_spi.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c13
-rw-r--r--drivers/net/dsa/lantiq_gswip.c14
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c15
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c16
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/mt7530.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c4
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c5
-rw-r--r--drivers/net/dsa/qca/ar9331.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c6
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c10
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c5
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c7
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c243
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c1
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c367
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c187
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c13
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h70
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c4
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c20
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c25
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c6
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/sun/sunhme.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c35
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c17
-rw-r--r--drivers/net/ieee802154/ca8210.c11
-rw-r--r--drivers/net/ieee802154/cc2520.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c1
-rw-r--r--drivers/net/ieee802154/mcr20a.c8
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_power.c52
-rw-r--r--drivers/net/ipa/ipa_power.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c5
-rw-r--r--drivers/net/macsec.c33
-rw-r--r--drivers/net/mctp/mctp-serial.c9
-rw-r--r--drivers/net/mdio/mdio-aspeed.c1
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c9
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/phy/at803x.c26
-rw-r--r--drivers/net/phy/broadcom.c1
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/marvell.c25
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/meson-gxl.c31
-rw-r--r--drivers/net/phy/mscc/mscc_main.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/sfp-bus.c5
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/usb/ax88179_178a.c68
-rw-r--r--drivers/net/usb/cdc_ether.c12
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/ipheth.c6
-rw-r--r--drivers/net/usb/lan78xx.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/smsc95xx.c28
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c13
-rw-r--r--drivers/net/wan/slic_ds26522.c3
-rw-r--r--drivers/net/wireguard/device.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c214
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/xen-netback/xenbus.c14
-rw-r--r--drivers/net/xen-netfront.c93
-rw-r--r--drivers/nfc/nfcmrvl/spi.c3
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nfc/st-nci/spi.c4
-rw-r--r--drivers/nfc/st95hf/core.c4
-rw-r--r--drivers/nfc/trf7970a.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c17
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h16
-rw-r--r--drivers/ntb/msi.c6
-rw-r--r--drivers/nvdimm/blk.c8
-rw-r--r--drivers/nvdimm/btt.c11
-rw-r--r--drivers/nvdimm/btt_devs.c1
-rw-r--r--drivers/nvdimm/bus.c1
-rw-r--r--drivers/nvdimm/nd_virtio.c6
-rw-r--r--drivers/nvdimm/pfn_devs.c1
-rw-r--r--drivers/nvdimm/pmem.h1
-rw-r--r--drivers/nvme/host/Kconfig8
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/constants.c185
-rw-r--r--drivers/nvme/host/core.c299
-rw-r--r--drivers/nvme/host/fabrics.c12
-rw-r--r--drivers/nvme/host/fabrics.h1
-rw-r--r--drivers/nvme/host/fc.c22
-rw-r--r--drivers/nvme/host/ioctl.c38
-rw-r--r--drivers/nvme/host/multipath.c34
-rw-r--r--drivers/nvme/host/nvme.h47
-rw-r--r--drivers/nvme/host/pci.c21
-rw-r--r--drivers/nvme/host/rdma.c118
-rw-r--r--drivers/nvme/host/tcp.c125
-rw-r--r--drivers/nvme/target/admin-cmd.c6
-rw-r--r--drivers/nvme/target/configfs.c66
-rw-r--r--drivers/nvme/target/core.c12
-rw-r--r--drivers/nvme/target/fc.c16
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c27
-rw-r--r--drivers/nvme/target/io-cmd-file.c17
-rw-r--r--drivers/nvme/target/loop.c6
-rw-r--r--drivers/nvme/target/nvmet.h4
-rw-r--r--drivers/nvme/target/passthru.c10
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c6
-rw-r--r--drivers/nvme/target/zns.c20
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/of_reserved_mem.c9
-rw-r--r--drivers/of/unittest.c16
-rw-r--r--drivers/parisc/ccio-dma.c3
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c85
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c31
-rw-r--r--drivers/pci/controller/pci-hyperv.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c3
-rw-r--r--drivers/pci/controller/pcie-apple.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c11
-rw-r--r--drivers/pci/controller/vmd.c14
-rw-r--r--drivers/pci/msi/irqdomain.c4
-rw-r--r--drivers/pci/msi/legacy.c1
-rw-r--r--drivers/pci/msi/msi.c3
-rw-r--r--drivers/pci/pci-driver.c35
-rw-r--r--drivers/pci/pcie/portdrv_core.c47
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/perf/Kconfig16
-rw-r--r--drivers/perf/Makefile2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c584
-rw-r--r--drivers/perf/arm-cci.c2
-rw-r--r--drivers/perf/arm-ccn.c10
-rw-r--r--drivers/perf/arm-cmn.c23
-rw-r--r--drivers/perf/arm_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c758
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c2
-rw-r--r--drivers/perf/thunderx2_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c8
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c3
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c38
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c35
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c3
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c11
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c23
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c64
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c25
-rw-r--r--drivers/pinctrl/pinctrl-k210.c4
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c3
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c6
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c90
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c15
-rw-r--r--drivers/platform/chrome/cros_ec.c4
-rw-r--r--drivers/platform/chrome/cros_ec.h2
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c4
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/platform/surface/Kconfig1
-rw-r--r--drivers/platform/surface/surface3_power.c13
-rw-r--r--drivers/platform/x86/amd-pmc.c57
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c97
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c26
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c24
-rw-r--r--drivers/platform/x86/x86-android-tablets.c105
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c4
-rw-r--r--drivers/power/supply/bq256xx_charger.c3
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/powercap/Kconfig8
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/dtpm.c333
-rw-r--r--drivers/powercap/dtpm_cpu.c55
-rw-r--r--drivers/powercap/dtpm_devfreq.c203
-rw-r--r--drivers/powercap/dtpm_subsys.h22
-rw-r--r--drivers/ptp/ptp_ocp.c25
-rw-r--r--drivers/regulator/Kconfig20
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/da9121-regulator.c16
-rw-r--r--drivers/regulator/max20086-regulator.c3
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c37
-rw-r--r--drivers/regulator/qcom_smd-regulator.c4
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c291
-rw-r--r--drivers/regulator/rt5190a-regulator.c513
-rw-r--r--drivers/regulator/sc2731-regulator.c2
-rw-r--r--drivers/regulator/ti-abb-regulator.c6
-rw-r--r--drivers/regulator/tps6286x-regulator.c159
-rw-r--r--drivers/regulator/vctrl-regulator.c5
-rw-r--r--drivers/regulator/virtual.c41
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig4
-rw-r--r--drivers/remoteproc/qcom_q6v5.c1
-rw-r--r--drivers/rpmsg/rpmsg_char.c22
-rw-r--r--drivers/rtc/rtc-ds1302.c3
-rw-r--r--drivers/rtc/rtc-ds1305.c4
-rw-r--r--drivers/rtc/rtc-ds1343.c4
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/block/scm_blk.h1
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c13
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c41
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/myrs.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c18
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c47
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h6
-rw-r--r--drivers/scsi/qedf/qedf_io.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c7
-rw-r--r--drivers/scsi/qedi/qedi_fw.c6
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c55
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c115
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sr.c131
-rw-r--r--drivers/scsi/sr.h6
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c11
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/ufshpb.c4
-rw-r--r--drivers/scsi/xen-scsifront.c3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c7
-rw-r--r--drivers/soc/fsl/guts.c14
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/qe_io.c2
-rw-r--r--drivers/soc/imx/gpcv2.c3
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h3
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c15
-rw-r--r--drivers/soc/rockchip/Kconfig8
-rw-r--r--drivers/soc/rockchip/Makefile1
-rw-r--r--drivers/soc/rockchip/dtpm.c65
-rw-r--r--drivers/soc/samsung/Kconfig2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c2
-rw-r--r--drivers/spi/Kconfig50
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-amd.c87
-rw-r--r--drivers/spi/spi-ath79.c34
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835aux.c21
-rw-r--r--drivers/spi/spi-bitbang-txrx.h66
-rw-r--r--drivers/spi/spi-cadence-xspi.c4
-rw-r--r--drivers/spi/spi-fsi.c10
-rw-r--r--drivers/spi/spi-geni-qcom.c7
-rw-r--r--drivers/spi/spi-gpio.c42
-rw-r--r--drivers/spi/spi-intel-pci.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi-pci.c)50
-rw-r--r--drivers/spi/spi-intel-platform.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi-platform.c)21
-rw-r--r--drivers/spi/spi-intel.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi.c)850
-rw-r--r--drivers/spi/spi-intel.h19
-rw-r--r--drivers/spi/spi-lantiq-ssc.c8
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-meson-spicc.c5
-rw-r--r--drivers/spi/spi-mpc512x-psc.c47
-rw-r--r--drivers/spi/spi-mt65xx.c136
-rw-r--r--drivers/spi/spi-mtk-nor.c71
-rw-r--r--drivers/spi/spi-npcm-fiu.c14
-rw-r--r--drivers/spi/spi-pic32.c9
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c320
-rw-r--r--drivers/spi/spi-pxa2xx.c68
-rw-r--r--drivers/spi/spi-qup.c5
-rw-r--r--drivers/spi/spi-rockchip-sfc.c4
-rw-r--r--drivers/spi/spi-rockchip.c135
-rw-r--r--drivers/spi/spi-s3c24xx.c47
-rw-r--r--drivers/spi/spi-s3c64xx.c80
-rw-r--r--drivers/spi/spi-slave-system-control.c3
-rw-r--r--drivers/spi/spi-slave-time.c3
-rw-r--r--drivers/spi/spi-st-ssc4.c31
-rw-r--r--drivers/spi/spi-stm32-qspi.c47
-rw-r--r--drivers/spi/spi-stm32.c9
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c584
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/spi/spi-tegra20-slink.c8
-rw-r--r--drivers/spi/spi-tegra210-quad.c341
-rw-r--r--drivers/spi/spi-tle62x0.c3
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/spi/spi-uniphier.c18
-rw-r--r--drivers/spi/spi-zynq-qspi.c3
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/spi/spi.c291
-rw-r--r--drivers/spi/spidev.c35
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft.h97
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c5
-rw-r--r--drivers/staging/greybus/gpio.c5
-rw-r--r--drivers/staging/pi433/pi433_if.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c22
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c16
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c20
-rw-r--r--drivers/staging/wfx/bus_spi.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_pscsi.c1
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c94
-rw-r--r--drivers/tee/optee/notif.c2
-rw-r--r--drivers/tee/optee/optee_private.h5
-rw-r--r--drivers/tee/optee/smc_abi.c60
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c2
-rw-r--r--drivers/thermal/intel/Kconfig14
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c23
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c157
-rw-r--r--drivers/thermal/intel/intel_hfi.c569
-rw-r--r--drivers/thermal/intel/intel_hfi.h17
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c9
-rw-r--r--drivers/thermal/intel/therm_throt.c22
-rw-r--r--drivers/thermal/qcom/lmh.c62
-rw-r--r--drivers/thermal/qcom/tsens.c5
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c13
-rw-r--r--drivers/thermal/thermal_netlink.c58
-rw-r--r--drivers/thermal/thermal_netlink.h14
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c12
-rw-r--r--drivers/tty/n_gsm.c65
-rw-r--r--drivers/tty/n_tty.c10
-rw-r--r--drivers/tty/rpmsg_tty.c40
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c100
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c61
-rw-r--r--drivers/tty/serial/amba-pl011.c11
-rw-r--r--drivers/tty/serial/max3100.c5
-rw-r--r--drivers/tty/serial/max310x.c3
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/serial_core.c34
-rw-r--r--drivers/tty/serial/stm32-usart.c14
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c3
-rw-r--r--drivers/usb/cdns3/drd.c6
-rw-r--r--drivers/usb/class/usbtmc.c13
-rw-r--r--drivers/usb/common/ulpi.c17
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/port.c9
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/drd.c6
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c21
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c25
-rw-r--r--drivers/usb/dwc3/gadget.c15
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c56
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/function/rndis.c18
-rw-r--r--drivers/usb/gadget/function/rndis.h1
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c2
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c4
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c6
-rw-r--r--drivers/usb/host/max3421-hcd.c3
-rw-r--r--drivers/usb/host/xen-hcd.c26
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci.c28
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/typec/port-mapper.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c26
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/tipd/core.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c34
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c2
-rw-r--r--drivers/vhost/iotlb.c11
-rw-r--r--drivers/vhost/vdpa.c2
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/vhost/vsock.c24
-rw-r--r--drivers/video/backlight/ams369fg06.c3
-rw-r--r--drivers/video/backlight/corgi_lcd.c3
-rw-r--r--drivers/video/backlight/ili922x.c3
-rw-r--r--drivers/video/backlight/l4f00242t03.c3
-rw-r--r--drivers/video/backlight/lm3630a_bl.c1
-rw-r--r--drivers/video/backlight/lms501kf03.c3
-rw-r--r--drivers/video/backlight/ltv350qv.c3
-rw-r--r--drivers/video/backlight/qcom-wled.c1
-rw-r--r--drivers/video/backlight/tdo24m.c3
-rw-r--r--drivers/video/backlight/tosa_lcd.c4
-rw-r--r--drivers/video/backlight/vgg2432a4.c4
-rw-r--r--drivers/video/console/Kconfig20
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/asiliantfb.c2
-rw-r--r--drivers/video/fbdev/atafb.c35
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c11
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c10
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c4
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1100fb.h2
-rw-r--r--drivers/video/fbdev/au1200fb.c4
-rw-r--r--drivers/video/fbdev/cirrusfb.c16
-rw-r--r--drivers/video/fbdev/controlfb.c2
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fb_defio.c9
-rw-r--r--drivers/video/fbdev/core/fbcon.c554
-rw-r--r--drivers/video/fbdev/core/fbcon.h72
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h9
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c37
-rw-r--r--drivers/video/fbdev/core/fbcvt.c53
-rw-r--r--drivers/video/fbdev/core/fbmem.c10
-rw-r--r--drivers/video/fbdev/core/tileblit.c16
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/video/fbdev/hyperv_fb.c16
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/kyro/STG4000InitDevice.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/nvidia/nv_i2c.c2
-rw-r--r--drivers/video/fbdev/ocfb.c2
-rw-r--r--drivers/video/fbdev/offb.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c16
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c13
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c1
-rw-r--r--drivers/video/fbdev/pxa168fb.c15
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c10
-rw-r--r--drivers/video/fbdev/s3c-fb.c20
-rw-r--r--drivers/video/fbdev/savage/savagefb.h1
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c7
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/skeletonfb.c12
-rw-r--r--drivers/video/fbdev/sm712fb.c46
-rw-r--r--drivers/video/fbdev/smscufx.c3
-rw-r--r--drivers/video/fbdev/ssd1307fb.c7
-rw-r--r--drivers/video/fbdev/stifb.c45
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/via/lcd.c2
-rw-r--r--drivers/video/fbdev/via/viafbdev.c10
-rw-r--r--drivers/video/fbdev/w100fb.c15
-rw-r--r--drivers/virt/Kconfig11
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/vmgenid.c100
-rw-r--r--drivers/virtio/Kconfig1
-rw-r--r--drivers/virtio/virtio.c56
-rw-r--r--drivers/virtio/virtio_mem.c9
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/xen/gntalloc.c25
-rw-r--r--drivers/xen/grant-table.c71
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/pvcalls-front.c8
-rw-r--r--drivers/xen/xenbus/xenbus_client.c24
1499 files changed, 26397 insertions, 12629 deletions
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 580ec796816b..78ca4987e619 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -44,6 +44,7 @@ static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
+ { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ba45541b1f1f..1e34f846508f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -11,6 +11,7 @@ menuconfig ACPI
depends on ARCH_SUPPORTS_ACPI
select PNP
select NLS
+ select CRC32
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
@@ -301,7 +302,7 @@ config ACPI_IPMI
help
This driver enables the ACPI to access the BMC controller. And it
uses the IPMI request/response message to communicate with BMC
- controller, which can be found on on the server.
+ controller, which can be found on the server.
To compile this driver as a module, choose M here:
the module will be called as acpi_ipmi.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bb757148e7ba..b5a8d3e00a52 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -81,6 +81,9 @@ obj-$(CONFIG_ACPI_AC) += ac.o
obj-$(CONFIG_ACPI_BUTTON) += button.o
obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
+fan-objs := fan_core.o
+fan-objs += fan_attr.o
+
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index bcae0f03572b..fbe0756259c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -21,6 +21,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/pxa2xx_ssp.h>
#include <linux/suspend.h>
#include <linux/delay.h>
@@ -82,7 +83,7 @@ struct lpss_device_desc {
const char *clk_con_id;
unsigned int prv_offset;
size_t prv_size_override;
- struct property_entry *properties;
+ const struct property_entry *properties;
void (*setup)(struct lpss_private_data *pdata);
bool resume_from_noirq;
};
@@ -219,10 +220,16 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
-static const struct lpss_device_desc lpt_dev_desc = {
+static const struct property_entry lpt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
+ { }
+};
+
+static const struct lpss_device_desc lpt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
| LPSS_SAVE_CTX,
.prv_offset = 0x800,
+ .properties = lpt_spi_properties,
};
static const struct lpss_device_desc lpt_i2c_dev_desc = {
@@ -282,9 +289,15 @@ static const struct lpss_device_desc bsw_uart_dev_desc = {
.properties = uart_properties,
};
+static const struct property_entry byt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
+ { }
+};
+
static const struct lpss_device_desc byt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
+ .properties = byt_spi_properties,
};
static const struct lpss_device_desc byt_sdio_dev_desc = {
@@ -305,11 +318,17 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
.resume_from_noirq = true,
};
+static const struct property_entry bsw_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
+ { }
+};
+
static const struct lpss_device_desc bsw_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY,
.prv_offset = 0x400,
.setup = lpss_deassert_reset,
+ .properties = bsw_spi_properties,
};
static const struct x86_cpu_id lpss_cpu_ids[] = {
@@ -329,8 +348,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
/* Lynxpoint LPSS devices */
- { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
- { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
+ { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
+ { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
@@ -356,8 +375,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
/* Broadwell LPSS devices */
- { "INT3430", LPSS_ADDR(lpt_dev_desc) },
- { "INT3431", LPSS_ADDR(lpt_dev_desc) },
+ { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
+ { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
@@ -366,7 +385,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3437", },
/* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_dev_desc) },
+ { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
{ }
};
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 78d621290a35..de3cbf152dee 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -95,7 +95,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
* Name of the platform device will be the same as @adev's.
*/
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
- struct property_entry *properties)
+ const struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 915c2433463d..e7c30ce06e18 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
+ }
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 19e50fcbf4d6..598fd19b65fa 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -29,6 +29,7 @@
#undef pr_fmt
#define pr_fmt(fmt) "BERT: " fmt
+#define ACPI_BERT_PRINT_MAX_LEN 1024
static int bert_disable;
@@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region,
}
pr_info_once("Error records from previous boot:\n");
-
- cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ if (region_len < ACPI_BERT_PRINT_MAX_LEN)
+ cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ else
+ pr_info_once("Max print length exceeded, table data is available at:\n"
+ "/sys/firmware/acpi/tables/data/BERT");
/*
* Because the boot error source is "one-time polled" type,
@@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str)
{
bert_disable = 1;
- return 0;
+ return 1;
}
__setup("bert_disable", setup_bert_disable);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 242f3c2d5533..698d67cee052 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear);
static int __init setup_erst_disable(char *str)
{
erst_disable = 1;
- return 0;
+ return 1;
}
__setup("erst_disable", setup_erst_disable);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0c5c9acc6254..d91ad378c00d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = {
.remove = ghes_remove,
};
-static int __init ghes_init(void)
+void __init acpi_ghes_init(void)
{
int rc;
+ sdei_init();
+
if (acpi_disabled)
- return -ENODEV;
+ return;
switch (hest_disable) {
case HEST_NOT_FOUND:
- return -ENODEV;
+ return;
case HEST_DISABLED:
pr_info(GHES_PFX "HEST is not enabled!\n");
- return -EINVAL;
+ return;
default:
break;
}
if (ghes_disable) {
pr_info(GHES_PFX "GHES is not enabled!\n");
- return -EINVAL;
+ return;
}
ghes_nmi_init_cxt();
rc = platform_driver_register(&ghes_platform_driver);
if (rc)
- goto err;
+ return;
rc = apei_osc_setup();
if (rc == 0 && osc_sb_apei_support_acked)
@@ -1494,9 +1496,4 @@ static int __init ghes_init(void)
pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
else
pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
-
- return 0;
-err:
- return rc;
}
-device_initcall(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 0edc1ed47673..6aef1ee5e1bd 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -224,7 +224,7 @@ err:
static int __init setup_hest_disable(char *str)
{
hest_disable = HEST_DISABLED;
- return 0;
+ return 1;
}
__setup("hest_disable", setup_hest_disable);
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
index 6dba187f4f2e..d4a72835f328 100644
--- a/drivers/acpi/arm64/Kconfig
+++ b/drivers/acpi/arm64/Kconfig
@@ -8,3 +8,13 @@ config ACPI_IORT
config ACPI_GTDT
bool
+
+config ACPI_AGDI
+ bool "Arm Generic Diagnostic Dump and Reset Device Interface"
+ depends on ARM_SDE_INTERFACE
+ help
+ Arm Generic Diagnostic Dump and Reset Device Interface (AGDI) is
+ a standard that enables issuing a non-maskable diagnostic dump and
+ reset command.
+
+ If set, the kernel parses AGDI table and listens for the command.
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 66acbe77f46e..7b9e4045659d 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ACPI_AGDI) += agdi.o
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-y += dma.o
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
new file mode 100644
index 000000000000..4df337d545b7
--- /dev/null
+++ b/drivers/acpi/arm64/agdi.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file implements handling of
+ * Arm Generic Diagnostic Dump and Reset Interface table (AGDI)
+ *
+ * Copyright (c) 2022, Ampere Computing LLC
+ */
+
+#define pr_fmt(fmt) "ACPI: AGDI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+struct agdi_data {
+ int sdei_event;
+};
+
+static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg)
+{
+ nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued");
+ return 0;
+}
+
+static int agdi_sdei_probe(struct platform_device *pdev,
+ struct agdi_data *adata)
+{
+ int err;
+
+ err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register for SDEI event %d",
+ adata->sdei_event);
+ return err;
+ }
+
+ err = sdei_event_enable(adata->sdei_event);
+ if (err) {
+ sdei_event_unregister(adata->sdei_event);
+ dev_err(&pdev->dev, "Failed to enable event %d\n",
+ adata->sdei_event);
+ return err;
+ }
+
+ return 0;
+}
+
+static int agdi_probe(struct platform_device *pdev)
+{
+ struct agdi_data *adata = dev_get_platdata(&pdev->dev);
+
+ if (!adata)
+ return -EINVAL;
+
+ return agdi_sdei_probe(pdev, adata);
+}
+
+static int agdi_remove(struct platform_device *pdev)
+{
+ struct agdi_data *adata = dev_get_platdata(&pdev->dev);
+ int err, i;
+
+ err = sdei_event_disable(adata->sdei_event);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = sdei_event_unregister(adata->sdei_event);
+ if (err != -EINPROGRESS)
+ break;
+
+ schedule();
+ }
+
+ return err;
+}
+
+static struct platform_driver agdi_driver = {
+ .driver = {
+ .name = "agdi",
+ },
+ .probe = agdi_probe,
+ .remove = agdi_remove,
+};
+
+void __init acpi_agdi_init(void)
+{
+ struct acpi_table_agdi *agdi_table;
+ struct agdi_data pdata;
+ struct platform_device *pdev;
+ acpi_status status;
+
+ status = acpi_get_table(ACPI_SIG_AGDI, 0,
+ (struct acpi_table_header **) &agdi_table);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) {
+ pr_warn("Interrupt signaling is not supported");
+ goto err_put_table;
+ }
+
+ pdata.sdei_event = agdi_table->sdei_event;
+
+ pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata));
+ if (IS_ERR(pdev))
+ goto err_put_table;
+
+ if (platform_driver_register(&agdi_driver))
+ platform_device_unregister(pdev);
+
+err_put_table:
+ acpi_put_table((struct acpi_table_header *)agdi_table);
+}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 3b23fb775ac4..f2f8f05662de 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
- res[1].start = pmcg->page1_base_address;
- res[1].end = pmcg->page1_base_address + SZ_4K - 1;
- res[1].flags = IORESOURCE_MEM;
+ /*
+ * The initial version in DEN0049C lacked a way to describe register
+ * page 1, which makes it broken for most PMCG implementations; in
+ * that case, just let the driver fail gracefully if it expects to
+ * find a second memory resource.
+ */
+ if (node->revision > 0) {
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+ }
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ea31ae01458b..dc208f5f5a1f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -59,6 +59,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
+
+ /* Microsoft Surface Go 3 */
+ {"MSHW0146", 0},
+
{"", 0},
};
@@ -1148,6 +1152,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
},
},
+ {
+ /* Microsoft Surface Go 3 */
+ .callback = battery_notification_delay_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 07f604832fd6..3e58b613a2c4 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -26,6 +26,7 @@
#include <asm/mpspec.h>
#include <linux/dmi.h>
#endif
+#include <linux/acpi_agdi.h>
#include <linux/acpi_iort.h>
#include <linux/acpi_viot.h>
#include <linux/pci.h>
@@ -283,6 +284,8 @@ EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
bool osc_sb_native_usb4_support_confirmed;
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
+bool osc_sb_cppc_not_supported;
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_negotiate_platform_control(void)
{
@@ -332,21 +335,38 @@ static void acpi_bus_osc_negotiate_platform_control(void)
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
- kfree(context.ret.pointer);
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length <= OSC_SUPPORT_DWORD) {
+ kfree(context.ret.pointer);
+ return;
+ }
+
+#ifdef CONFIG_X86
+ if (boot_cpu_has(X86_FEATURE_HWP))
+ osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
+ (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
+#endif
- /* Now run _OSC again with query flag clear */
+ /*
+ * Now run _OSC again with query flag clear and with the caps
+ * supported by both the OS and the platform.
+ */
capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
+ kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- osc_sb_native_usb4_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+ }
kfree(context.ret.pointer);
}
@@ -1043,7 +1063,12 @@ struct bus_type acpi_bus_type = {
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
-EXPORT_SYMBOL_GPL(acpi_bus_type);
+
+int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data)
+{
+ return bus_for_each_dev(&acpi_bus_type, NULL, data, fn);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_for_each_dev);
/* --------------------------------------------------------------------------
Initialization/Cleanup
@@ -1331,6 +1356,8 @@ static int __init acpi_init(void)
pci_mmcfg_late_init();
acpi_iort_init();
+ acpi_hest_init();
+ acpi_ghes_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
@@ -1339,6 +1366,7 @@ static int __init acpi_init(void)
acpi_debugger_init();
acpi_setup_sb_notify_handler();
acpi_viot_init();
+ acpi_agdi_init();
return 0;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 866560cbb082..d418449194ee 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -633,8 +633,8 @@ static bool is_cppc_supported(int revision, int num_ent)
* )
*/
-#ifndef init_freq_invariance_cppc
-static inline void init_freq_invariance_cppc(void) { }
+#ifndef arch_init_invariance_cppc
+static inline void arch_init_invariance_cppc(void) { }
#endif
/**
@@ -656,6 +656,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -EFAULT;
+ if (osc_sb_cppc_not_supported)
+ return -ENODEV;
+
/* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
@@ -816,7 +819,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- init_freq_invariance_cppc();
+ arch_init_invariance_cppc();
kfree(output.pointer);
return 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0077d2c85df8..a1b871a418f8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -168,7 +168,7 @@ struct acpi_ec_query {
};
static int acpi_ec_submit_query(struct acpi_ec *ec);
-static bool advance_transaction(struct acpi_ec *ec, bool interrupt);
+static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
struct acpi_ec *first_ec;
@@ -441,36 +441,35 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
return true;
}
-static bool acpi_ec_submit_event(struct acpi_ec *ec)
+static void acpi_ec_submit_event(struct acpi_ec *ec)
{
+ /*
+ * It is safe to mask the events here, because acpi_ec_close_event()
+ * will run at least once after this.
+ */
acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
- return false;
+ return;
- if (ec->event_state == EC_EVENT_READY) {
- ec_dbg_evt("Command(%s) submitted/blocked",
- acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+ if (ec->event_state != EC_EVENT_READY)
+ return;
- ec->event_state = EC_EVENT_IN_PROGRESS;
- /*
- * If events_to_process is greqter than 0 at this point, the
- * while () loop in acpi_ec_event_handler() is still running
- * and incrementing events_to_process will cause it to invoke
- * acpi_ec_submit_query() once more, so it is not necessary to
- * queue up the event work to start the same loop again.
- */
- if (ec->events_to_process++ > 0)
- return true;
-
- ec->events_in_progress++;
- return queue_work(ec_wq, &ec->work);
- }
+ ec_dbg_evt("Command(%s) submitted/blocked",
+ acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+ ec->event_state = EC_EVENT_IN_PROGRESS;
/*
- * The event handling work has not been completed yet, so it needs to be
- * flushed.
+ * If events_to_process is greater than 0 at this point, the while ()
+ * loop in acpi_ec_event_handler() is still running and incrementing
+ * events_to_process will cause it to invoke acpi_ec_submit_query() once
+ * more, so it is not necessary to queue up the event work to start the
+ * same loop again.
*/
- return true;
+ if (ec->events_to_process++ > 0)
+ return;
+
+ ec->events_in_progress++;
+ queue_work(ec_wq, &ec->work);
}
static void acpi_ec_complete_event(struct acpi_ec *ec)
@@ -655,11 +654,10 @@ static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t
acpi_ec_mask_events(ec);
}
-static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
+static void advance_transaction(struct acpi_ec *ec, bool interrupt)
{
struct transaction *t = ec->curr;
bool wakeup = false;
- bool ret = false;
u8 status;
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
@@ -724,12 +722,10 @@ static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
out:
if (status & ACPI_EC_FLAG_SCI)
- ret = acpi_ec_submit_event(ec);
+ acpi_ec_submit_event(ec);
if (wakeup && interrupt)
wake_up(&ec->wait);
-
- return ret;
}
static void start_transaction(struct acpi_ec *ec)
@@ -1242,6 +1238,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
acpi_ec_submit_query(ec);
spin_lock_irq(&ec->lock);
+
ec->events_to_process--;
}
@@ -1250,27 +1247,30 @@ static void acpi_ec_event_handler(struct work_struct *work)
* event handling work again regardless of whether or not the query
* queued up above is processed successfully.
*/
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT)
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
+ bool guard_timeout;
+
acpi_ec_complete_event(ec);
- else
- acpi_ec_close_event(ec);
- spin_unlock_irq(&ec->lock);
+ ec_dbg_evt("Event stopped");
- ec_dbg_evt("Event stopped");
+ spin_unlock_irq(&ec->lock);
+
+ guard_timeout = !!ec_guard(ec);
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec_guard(ec)) {
spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
- if (!ec->curr)
+ if (guard_timeout && !ec->curr)
advance_transaction(ec, false);
+ } else {
+ acpi_ec_close_event(ec);
- spin_unlock_irq(&ec->lock);
+ ec_dbg_evt("Event stopped");
}
- spin_lock_irq(&ec->lock);
ec->events_in_progress--;
+
spin_unlock_irq(&ec->lock);
}
@@ -2051,6 +2051,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
+static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
+{
+ return ec->events_in_progress + ec->queries_in_progress > 0;
+}
+
bool acpi_ec_dispatch_gpe(void)
{
bool work_in_progress = false;
@@ -2066,13 +2071,27 @@ bool acpi_ec_dispatch_gpe(void)
return true;
/*
+ * Cancel the SCI wakeup and process all pending events in case there
+ * are any wakeup ones in there.
+ *
+ * Note that if any non-EC GPEs are active at this point, the SCI will
+ * retrigger after the rearming in acpi_s2idle_wake(), so no events
+ * should be missed by canceling the wakeup here.
+ */
+ pm_system_cancel_wakeup();
+
+ /*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
spin_lock_irq(&first_ec->lock);
- if (acpi_ec_gpe_status_set(first_ec))
- work_in_progress = advance_transaction(first_ec, false);
+ if (acpi_ec_gpe_status_set(first_ec)) {
+ pm_pr_dbg("ACPI EC GPE status set\n");
+
+ advance_transaction(first_ec, false);
+ work_in_progress = acpi_ec_work_in_progress(first_ec);
+ }
spin_unlock_irq(&first_ec->lock);
@@ -2089,8 +2108,7 @@ bool acpi_ec_dispatch_gpe(void)
spin_lock_irq(&first_ec->lock);
- work_in_progress = first_ec->events_in_progress +
- first_ec->queries_in_progress > 0;
+ work_in_progress = acpi_ec_work_in_progress(first_ec);
spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index dd9bb8ca2244..44728529a5b6 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -6,9 +6,53 @@
*
* Add new device IDs before the generic ACPI fan one.
*/
+
+#ifndef _ACPI_FAN_H_
+#define _ACPI_FAN_H_
+
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
+
+#define ACPI_FPS_NAME_LEN 20
+
+struct acpi_fan_fps {
+ u64 control;
+ u64 trip_point;
+ u64 speed;
+ u64 noise_level;
+ u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
+};
+
+struct acpi_fan_fif {
+ u8 revision;
+ u8 fine_grain_ctrl;
+ u8 step_size;
+ u8 low_speed_notification;
+};
+
+struct acpi_fan_fst {
+ u64 revision;
+ u64 control;
+ u64 speed;
+};
+
+struct acpi_fan {
+ bool acpi4;
+ struct acpi_fan_fif fif;
+ struct acpi_fan_fps *fps;
+ int fps_count;
+ struct thermal_cooling_device *cdev;
+ struct device_attribute fst_speed;
+ struct device_attribute fine_grain_control;
+};
+
+int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+int acpi_fan_create_attributes(struct acpi_device *device);
+void acpi_fan_delete_attributes(struct acpi_device *device);
+#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
new file mode 100644
index 000000000000..f15157d40713
--- /dev/null
+++ b/drivers/acpi/fan_attr.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fan_attr.c - Create extra attributes for ACPI Fan driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include "fan.h"
+
+MODULE_LICENSE("GPL");
+
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power);
+
+ return count;
+}
+
+static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
+ struct acpi_fan_fst fst;
+ int status;
+
+ status = acpi_fan_get_fst(acpi_dev, &fst);
+ if (status)
+ return status;
+
+ return sprintf(buf, "%lld\n", fst.speed);
+}
+
+static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
+ struct acpi_fan *fan = acpi_driver_data(acpi_dev);
+
+ return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+}
+
+int acpi_fan_create_attributes(struct acpi_device *device)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ int i, status;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ return status;
+
+ /* _FST is present if we are here */
+ sysfs_attr_init(&fan->fst_speed.attr);
+ fan->fst_speed.show = show_fan_speed;
+ fan->fst_speed.store = NULL;
+ fan->fst_speed.attr.name = "fan_speed_rpm";
+ fan->fst_speed.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
+ if (status)
+ goto rem_fine_grain_attr;
+
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ sysfs_attr_init(&fps->dev_attr.attr);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ goto rem_fst_attr;
+ }
+ }
+
+ return 0;
+
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+rem_fine_grain_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+
+ return status;
+}
+
+void acpi_fan_delete_attributes(struct acpi_device *device)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+ sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan_core.c
index 5cd0ceb50bc8..b9a9a59ddcc1 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan_core.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $)
+ * fan_core.c - ACPI Fan core Driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -45,33 +46,6 @@ static const struct dev_pm_ops acpi_fan_pm = {
#define FAN_PM_OPS_PTR NULL
#endif
-#define ACPI_FPS_NAME_LEN 20
-
-struct acpi_fan_fps {
- u64 control;
- u64 trip_point;
- u64 speed;
- u64 noise_level;
- u64 power;
- char name[ACPI_FPS_NAME_LEN];
- struct device_attribute dev_attr;
-};
-
-struct acpi_fan_fif {
- u64 revision;
- u64 fine_grain_ctrl;
- u64 step_size;
- u64 low_speed_notification;
-};
-
-struct acpi_fan {
- bool acpi4;
- struct acpi_fan_fif fif;
- struct acpi_fan_fps *fps;
- int fps_count;
- struct thermal_cooling_device *cdev;
-};
-
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
.remove = acpi_fan_remove,
@@ -89,25 +63,29 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
struct acpi_device *device = cdev->devdata;
struct acpi_fan *fan = acpi_driver_data(device);
- if (fan->acpi4)
- *state = fan->fps_count - 1;
- else
+ if (fan->acpi4) {
+ if (fan->fif.fine_grain_ctrl)
+ *state = 100 / fan->fif.step_size;
+ else
+ *state = fan->fps_count - 1;
+ } else {
*state = 1;
+ }
+
return 0;
}
-static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
+int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_fan *fan = acpi_driver_data(device);
union acpi_object *obj;
acpi_status status;
- int control, i;
+ int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
- return status;
+ return -ENODEV;
}
obj = buffer.pointer;
@@ -115,35 +93,52 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
- status = -EINVAL;
+ ret = -EINVAL;
goto err;
}
- control = obj->package.elements[1].integer.value;
+ fst->revision = obj->package.elements[0].integer.value;
+ fst->control = obj->package.elements[1].integer.value;
+ fst->speed = obj->package.elements[2].integer.value;
+
+err:
+ kfree(obj);
+ return ret;
+}
+
+static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ struct acpi_fan_fst fst;
+ int status, i;
+
+ status = acpi_fan_get_fst(device, &fst);
+ if (status)
+ return status;
+
+ if (fan->fif.fine_grain_ctrl) {
+ /* This control should be same what we set using _FSL by spec */
+ if (fst.control > 100) {
+ dev_dbg(&device->dev, "Invalid control value returned\n");
+ goto match_fps;
+ }
+
+ *state = (int) fst.control / fan->fif.step_size;
+ return 0;
+ }
+
+match_fps:
for (i = 0; i < fan->fps_count; i++) {
- /*
- * When Fine Grain Control is set, return the state
- * corresponding to maximum fan->fps[i].control
- * value compared to the current speed. Here the
- * fan->fps[] is sorted array with increasing speed.
- */
- if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
- i = (i > 0) ? i - 1 : 0;
+ if (fst.control == fan->fps[i].control)
break;
- } else if (control == fan->fps[i].control) {
- break;
- }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
- status = -EINVAL;
- goto err;
+ return -EINVAL;
}
*state = i;
-err:
- kfree(obj);
return status;
}
@@ -187,15 +182,30 @@ static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state)
{
struct acpi_fan *fan = acpi_driver_data(device);
acpi_status status;
+ u64 value = state;
+ int max_state;
- if (state >= fan->fps_count)
+ if (fan->fif.fine_grain_ctrl)
+ max_state = 100 / fan->fif.step_size;
+ else
+ max_state = fan->fps_count - 1;
+
+ if (state > max_state)
return -EINVAL;
- status = acpi_execute_simple_method(device->handle, "_FSL",
- fan->fps[state].control);
+ if (fan->fif.fine_grain_ctrl) {
+ value *= fan->fif.step_size;
+ /* Spec allows compensate the last step only */
+ if (value + fan->fif.step_size > 100)
+ value = 100;
+ } else {
+ value = fan->fps[state].control;
+ }
+
+ status = acpi_execute_simple_method(device->handle, "_FSL", value);
if (ACPI_FAILURE(status)) {
dev_dbg(&device->dev, "Failed to set state by _FSL\n");
- return status;
+ return -ENODEV;
}
return 0;
@@ -237,7 +247,8 @@ static int acpi_fan_get_fif(struct acpi_device *device)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_buffer format = { sizeof("NNNN"), "NNNN" };
- struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif };
+ u64 fields[4];
+ struct acpi_buffer fif = { sizeof(fields), fields };
union acpi_object *obj;
acpi_status status;
@@ -258,6 +269,17 @@ static int acpi_fan_get_fif(struct acpi_device *device)
status = -EINVAL;
}
+ fan->fif.revision = fields[0];
+ fan->fif.fine_grain_ctrl = fields[1];
+ fan->fif.step_size = fields[2];
+ fan->fif.low_speed_notification = fields[3];
+
+ /* If there is a bug in step size and set as 0, change to 1 */
+ if (!fan->fif.step_size)
+ fan->fif.step_size = 1;
+ /* If step size > 9, change to 9 (by spec valid values 1-9) */
+ else if (fan->fif.step_size > 9)
+ fan->fif.step_size = 9;
err:
kfree(obj);
return status;
@@ -270,39 +292,6 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
return fps1->speed - fps2->speed;
}
-static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
- int count;
-
- if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
- else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
-
- if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point);
-
- if (fps->speed == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed);
-
- if (fps->noise_level == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100);
-
- if (fps->power == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power);
-
- return count;
-}
-
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
@@ -347,25 +336,6 @@ static int acpi_fan_get_fps(struct acpi_device *device)
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
- for (i = 0; i < fan->fps_count; ++i) {
- struct acpi_fan_fps *fps = &fan->fps[i];
-
- snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
- sysfs_attr_init(&fps->dev_attr.attr);
- fps->dev_attr.show = show_state;
- fps->dev_attr.store = NULL;
- fps->dev_attr.attr.name = fps->name;
- fps->dev_attr.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
- if (status) {
- int j;
-
- for (j = 0; j < i; ++j)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- break;
- }
- }
-
err:
kfree(obj);
return status;
@@ -396,6 +366,10 @@ static int acpi_fan_probe(struct platform_device *pdev)
if (result)
return result;
+ result = acpi_fan_create_attributes(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
@@ -437,12 +411,8 @@ static int acpi_fan_probe(struct platform_device *pdev)
return 0;
err_end:
- if (fan->acpi4) {
- int i;
-
- for (i = 0; i < fan->fps_count; ++i)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- }
+ if (fan->acpi4)
+ acpi_fan_delete_attributes(device);
return result;
}
@@ -453,10 +423,8 @@ static int acpi_fan_remove(struct platform_device *pdev)
if (fan->acpi4) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- int i;
- for (i = 0; i < fan->fps_count; ++i)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ acpi_fan_delete_attributes(device);
}
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 457e11d851b8..628bf8f18130 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -96,8 +96,6 @@ void acpi_scan_table_notify(void);
extern struct list_head acpi_bus_id_list;
-#define ACPI_MAX_DEVICE_INSTANCES 4096
-
struct acpi_device_bus_id {
const char *bus_id;
struct ida instance_ida;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45c5c0e45e33..7a70c4bfc23c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -642,22 +642,24 @@ u64 acpi_os_get_timer(void)
(ACPI_100NSEC_PER_SEC / HZ);
}
-acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
+acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
- if (!value)
+ if (value)
+ *value = 0;
+ else
value = &dummy;
- *value = 0;
if (width <= 8) {
- *(u8 *) value = inb(port);
+ *value = inb(port);
} else if (width <= 16) {
- *(u16 *) value = inw(port);
+ *value = inw(port);
} else if (width <= 32) {
- *(u32 *) value = inl(port);
+ *value = inl(port);
} else {
- BUG();
+ pr_debug("%s: Access width %d not supported\n", __func__, width);
+ return AE_BAD_PARAMETER;
}
return AE_OK;
@@ -674,7 +676,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
} else if (width <= 32) {
outl(value, port);
} else {
- BUG();
+ pr_debug("%s: Access width %d not supported\n", __func__, width);
+ return AE_BAD_PARAMETER;
}
return AE_OK;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index d54fb8e54671..58647051c948 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -185,7 +185,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
if (!p || !p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
- * particularly those those w/ _STA disabled
+ * particularly those w/ _STA disabled
*/
pr_debug("Blank _CRS IRQ resource\n");
return AE_OK;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b76db99cced3..6f9e75d14808 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -22,8 +22,6 @@
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/platform_data/x86/apple.h>
-#include <acpi/apei.h> /* for acpi_hest_init() */
-
#include "internal.h"
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
@@ -943,7 +941,6 @@ out_release_info:
void __init acpi_pci_root_init(void)
{
- acpi_hest_init();
if (acpi_pci_disabled)
return;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 86560a28751b..32b20efff5f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
+ /* T40 can not handle C3 idle state */
+ { set_max_cstate, "IBM ThinkPad T40", {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+ (void *)2},
{},
};
@@ -1075,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr,
return 0;
}
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
@@ -1083,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ /* make sure our architecture has support */
+ ret = acpi_processor_ffh_lpi_probe(pr->id);
+ if (ret == -EOPNOTSUPP)
+ return ret;
+
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
@@ -1134,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
return 0;
}
-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
-{
- return -ENODEV;
-}
-
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index d0986bda2964..12bbfe833609 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -541,7 +541,8 @@ acpi_device_data_of_node(const struct fwnode_handle *fwnode)
if (is_acpi_device_node(fwnode)) {
const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
- } else if (is_acpi_data_node(fwnode)) {
+ }
+ if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
@@ -685,7 +686,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
*/
if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
if (index)
- return -EINVAL;
+ return -ENOENT;
device = acpi_fetch_acpi_dev(obj->reference.handle);
if (!device)
@@ -739,14 +740,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
}
- /* assume following integer elements are all args */
+ /*
+ * Assume the following integer elements are all args.
+ * Stop counting on the first reference or end of the
+ * package arguments. In case of neither reference,
+ * nor integer, return an error, we can't parse it.
+ */
for (i = 0; element + i < end && i < num_args; i++) {
int type = element[i].type;
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
if (type == ACPI_TYPE_INTEGER)
nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
else
return -EINVAL;
}
@@ -950,7 +956,7 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
if (proptype != DEV_PROP_STRING && nval > obj->package.count)
return -EOVERFLOW;
- else if (nval <= 0)
+ if (nval == 0)
return -EINVAL;
items = obj->package.elements;
@@ -1012,14 +1018,10 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct list_head *head;
struct list_head *next;
- if (!child || is_acpi_device_node(child)) {
+ if ((!child || is_acpi_device_node(child)) && adev) {
struct acpi_device *child_adev;
- if (adev)
- head = &adev->children;
- else
- goto nondev;
-
+ head = &adev->children;
if (list_empty(head))
goto nondev;
@@ -1089,7 +1091,8 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode)
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
- } else if (is_acpi_device_node(fwnode)) {
+ }
+ if (is_acpi_device_node(fwnode)) {
struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
if (dev)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1331756d4cfc..5ffd87ac42b3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -477,7 +477,8 @@ static void acpi_device_del(struct acpi_device *device)
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
- ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+ ida_free(&acpi_device_bus_id->instance_ida,
+ device->pnp.instance_no);
if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
@@ -642,7 +643,7 @@ static int acpi_device_set_name(struct acpi_device *device,
struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
int result;
- result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+ result = ida_alloc(instance_ida, GFP_KERNEL);
if (result < 0)
return result;
@@ -1377,11 +1378,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_HID) {
acpi_add_id(pnp, info->hardware_id.string);
pnp->type.platform_id = 1;
- if (info->valid & ACPI_VALID_CID) {
- cid_list = &info->compatible_id_list;
- for (i = 0; i < cid_list->count; i++)
- acpi_add_id(pnp, cid_list->ids[i].string);
- }
+ }
+ if (info->valid & ACPI_VALID_CID) {
+ cid_list = &info->compatible_id_list;
+ for (i = 0; i < cid_list->count; i++)
+ acpi_add_id(pnp, cid_list->ids[i].string);
}
if (info->valid & ACPI_VALID_ADR) {
pnp->bus_address = info->address;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a60ff5dfed3a..c992e57b2c79 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void)
return true;
}
- /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ /*
+ * Check non-EC GPE wakeups and if there are none, cancel the
+ * SCI-related wakeup and dispatch the EC GPE.
+ */
if (acpi_ec_dispatch_gpe()) {
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
}
- /*
- * Cancel the SCI wakeup and process all pending events in case
- * there are any wakeup ones in there.
- *
- * Note that if any non-EC GPEs are active at this point, the
- * SCI will retrigger after the rearming below, so no events
- * should be missed by canceling the wakeup here.
- */
- pm_system_cancel_wakeup();
acpi_os_wait_events_complete();
/*
@@ -764,6 +758,9 @@ bool acpi_s2idle_wake(void)
return true;
}
+ pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
+
+ pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
@@ -874,12 +871,7 @@ static inline void acpi_sleep_syscore_init(void) {}
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
-static int sigcheck = -1; /* Default behaviour is just to warn */
-
-void __init acpi_check_s4_hw_signature(int check)
-{
- sigcheck = check;
-}
+int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
static int acpi_hibernation_begin(pm_message_t stage)
{
@@ -1004,7 +996,7 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
- if (!sigcheck)
+ if (!acpi_check_s4_hw_signature)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
@@ -1016,7 +1008,7 @@ static void acpi_sleep_hibernate_setup(void)
*/
s4_hardware_signature = facs->hardware_signature;
- if (sigcheck > 0) {
+ if (acpi_check_s4_hw_signature > 0) {
/*
* If we're actually obeying the ACPI specification
* then the signature is written out as part of the
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 0741a4933f62..ceee808f7f2a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
acpi_get_table(id, instance, &table_header);
if (!table_header) {
- pr_warn("%4.4s not present\n", id);
+ pr_debug("%4.4s not present\n", id);
return -ENODEV;
}
@@ -545,7 +545,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT, ACPI_SIG_AEST };
+ ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4f64713e9917..becc198e4c22 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -415,6 +415,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
+ /*
+ * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
+ * working native and video interface. However the default detection
+ * mechanism first registers the video interface before unregistering
+ * it again and switching to the native interface during boot. This
+ * results in a dangling SBIOS request for backlight change for some
+ * reason, causing the backlight to switch to ~2% once per boot on the
+ * first power cord connect or disconnect event. Setting the native
+ * interface explicitly circumvents this buggy behaviour, by avoiding
+ * the unregistering process.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..ed889f827f53 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
- *
- * Only enable on !AMD as enabling this universally causes problems for a number
- * of AMD based systems.
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
*/
- if (!acpi_s2idle_vendor_amd())
- acpi_ec_mark_gpe_for_wake();
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ffdeed5334d6..664070fc8349 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -285,6 +285,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Lenovo Yoga Tablet 1050F/L */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Nextbook Ares 8 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e1a5eca3ae3c..d3bd14aaabf6 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -370,6 +370,7 @@ int amba_driver_register(struct amba_driver *drv)
return driver_register(&drv->drv);
}
+EXPORT_SYMBOL(amba_driver_register);
/**
* amba_driver_unregister - remove an AMBA device driver
@@ -383,7 +384,7 @@ void amba_driver_unregister(struct amba_driver *drv)
{
driver_unregister(&drv->drv);
}
-
+EXPORT_SYMBOL(amba_driver_unregister);
static void amba_device_release(struct device *dev)
{
@@ -642,6 +643,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
return amba_device_add(dev, parent);
}
+EXPORT_SYMBOL(amba_device_register);
/**
* amba_device_put - put an AMBA device
@@ -668,66 +670,7 @@ void amba_device_unregister(struct amba_device *dev)
{
device_unregister(&dev->dev);
}
-
-
-struct find_data {
- struct amba_device *dev;
- struct device *parent;
- const char *busid;
- unsigned int id;
- unsigned int mask;
-};
-
-static int amba_find_match(struct device *dev, void *data)
-{
- struct find_data *d = data;
- struct amba_device *pcdev = to_amba_device(dev);
- int r;
-
- r = (pcdev->periphid & d->mask) == d->id;
- if (d->parent)
- r &= d->parent == dev->parent;
- if (d->busid)
- r &= strcmp(dev_name(dev), d->busid) == 0;
-
- if (r) {
- get_device(dev);
- d->dev = pcdev;
- }
-
- return r;
-}
-
-/**
- * amba_find_device - locate an AMBA device given a bus id
- * @busid: bus id for device (or NULL)
- * @parent: parent device (or NULL)
- * @id: peripheral ID (or 0)
- * @mask: peripheral ID mask (or 0)
- *
- * Return the AMBA device corresponding to the supplied parameters.
- * If no device matches, returns NULL.
- *
- * NOTE: When a valid device is found, its refcount is
- * incremented, and must be decremented before the returned
- * reference.
- */
-struct amba_device *
-amba_find_device(const char *busid, struct device *parent, unsigned int id,
- unsigned int mask)
-{
- struct find_data data;
-
- data.dev = NULL;
- data.parent = parent;
- data.busid = busid;
- data.id = id;
- data.mask = mask;
-
- bus_for_each_dev(&amba_bustype, NULL, &data, amba_find_match);
-
- return data.dev;
-}
+EXPORT_SYMBOL(amba_device_unregister);
/**
* amba_request_regions - request all mem regions associated with device
@@ -749,6 +692,7 @@ int amba_request_regions(struct amba_device *dev, const char *name)
return ret;
}
+EXPORT_SYMBOL(amba_request_regions);
/**
* amba_release_regions - release mem regions associated with device
@@ -763,11 +707,4 @@ void amba_release_regions(struct amba_device *dev)
size = resource_size(&dev->res);
release_mem_region(dev->res.start, size);
}
-
-EXPORT_SYMBOL(amba_driver_register);
-EXPORT_SYMBOL(amba_driver_unregister);
-EXPORT_SYMBOL(amba_device_register);
-EXPORT_SYMBOL(amba_device_unregister);
-EXPORT_SYMBOL(amba_find_device);
-EXPORT_SYMBOL(amba_request_regions);
EXPORT_SYMBOL(amba_release_regions);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb54631fd950..e5641e6c52ee 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,14 +115,14 @@ config SATA_AHCI
If unsure, say N.
-config SATA_MOBILE_LPM_POLICY
- int "Default SATA Link Power Management policy for mobile chipsets"
+config SATA_LPM_POLICY
+ int "Default SATA Link Power Management policy for low power chipsets"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
- for mobile / laptop variants of chipsets / "South Bridges".
+ for chipsets / "South Bridges" designated as supporting low power.
The value set has the following meanings:
0 => Keep firmware settings
@@ -283,7 +283,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
- depends on ARCH_GEMINI || COMPILE_TEST
+ depends on ARCH_GEMINI || (OF && COMPILE_TEST)
select SATA_HOST
default ARCH_GEMINI
help
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 536d4cb8f08b..7654a40c12b4 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -265,7 +265,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_FAILED)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ab5811ef5a53..84456c05e845 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -50,7 +50,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
- board_ahci_mobile,
+ board_ahci_low_power,
board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
@@ -135,8 +135,8 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_mobile] = {
- AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ [board_ahci_low_power] = {
+ AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -275,13 +275,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -291,9 +291,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
@@ -316,9 +316,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -327,29 +327,29 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -381,26 +381,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
@@ -413,13 +413,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
{ PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
- { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
- { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
- { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
- { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
- { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -447,7 +447,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
- { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
@@ -582,6 +582,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
+ .driver_data = board_ahci_no_debounce_delay },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
@@ -737,7 +739,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
@@ -806,7 +808,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
@@ -889,7 +891,8 @@ static int ahci_pci_device_suspend(struct device *dev)
}
ahci_pci_disable_interrupts(host);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int ahci_pci_device_resume(struct device *dev)
@@ -1592,11 +1595,11 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
struct ahci_host_priv *hpriv)
{
- int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+ int policy = CONFIG_SATA_LPM_POLICY;
- /* Ignore processing for non mobile platforms */
- if (!(hpriv->flags & AHCI_HFLAG_IS_MOBILE))
+ /* Ignore processing for chipsets that don't use policy */
+ if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
return;
/* user modified policy via module param */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index eeac5482f1d1..5badbaca05a0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -235,8 +235,8 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
- AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
- SATA_MOBILE_LPM_POLICY
+ AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use
+ SATA_LPM_POLICY
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
suspend/resume */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 64dd8aa397d5..ab8552b1ff2a 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -427,7 +427,7 @@ static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
{.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index acf59f51b356..cb24ecf36faf 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -363,7 +363,7 @@ static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
static const struct of_device_id ceva_ahci_of_match[] = {
{ .compatible = "ceva,ahci-1v84" },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 0e8276600712..052c28e250aa 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -241,7 +241,7 @@ static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_da850_of_match[] = {
{ .compatible = "ti,da850-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 8bec41041671..8a92112dcd59 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -176,7 +176,7 @@ static SIMPLE_DEV_PM_OPS(ahci_dm816_pm_ops,
static const struct of_device_id ahci_dm816_of_match[] = {
{ .compatible = "ti,dm816-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 388baf528fa8..79aa9f285312 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -811,7 +811,7 @@ static const struct of_device_id imx_ahci_of_match[] = {
{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
{ .compatible = "fsl,imx6qp-ahci", .data = (void *)AHCI_IMX6QP },
{ .compatible = "fsl,imx8qm-ahci", .data = (void *)AHCI_IMX8QM },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index d9b08ae7c3b2..1f6c85fde983 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -169,7 +169,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "mediatek,mtk-ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 3ad46d26d9d5..991413a272e6 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -239,7 +239,7 @@ static const struct of_device_id ahci_mvebu_of_match[] = {
.compatible = "marvell,armada-3700-ahci",
.data = &ahci_mvebu_armada_3700_plat_data,
},
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index 5a44e089c6bb..b9460b91288f 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -80,7 +80,7 @@ static int ahci_octeon_remove(struct platform_device *pdev)
static const struct of_device_id octeon_ahci_match[] = {
{ .compatible = "cavium,octeon-7130-sata-uctl", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_ahci_match);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 3aab2e3d57f3..28a8de5b48b9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -88,7 +88,7 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
{ .compatible = "cavium,octeon-7130-ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index bf5b388bd4e0..6cd61842ad48 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -77,7 +77,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
{ .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -123,7 +123,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index c268264c2129..7526653c843b 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -232,7 +232,7 @@ static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_ahci_match);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 56b695136977..c7273c1cb0c7 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -286,7 +286,7 @@ static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_sunxi_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ahci", },
{ .compatible = "allwinner,sun8i-r40-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 8e206379d699..7bb5db17f864 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link,
do {
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
@@ -726,7 +726,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
static const struct of_device_id xgene_ahci_of_match[] = {
{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 27b0d903f91f..ade5e894563b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -993,11 +993,8 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
unsigned long flags;
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
/* Some braindamaged ACPI suspend implementations expect the
* controller to be awake on entry; otherwise, it burns cpu
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0ed484e04fd6..cf8c7fd59ada 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1561,7 +1561,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, online,
@@ -2033,7 +2033,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_FAILED)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 18296443ccba..65227ef6b846 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -733,7 +733,8 @@ int ahci_platform_suspend_host(struct device *dev)
if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
ahci_platform_disable_phys(hpriv);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8cfa8c96bb13..3d345d173556 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -546,13 +546,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev,
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
- tf->feature = gtf->tf[0]; /* 0x1f1 */
+ tf->error = gtf->tf[0]; /* 0x1f1 */
tf->nsect = gtf->tf[1]; /* 0x1f2 */
tf->lbal = gtf->tf[2]; /* 0x1f3 */
tf->lbam = gtf->tf[3]; /* 0x1f4 */
tf->lbah = gtf->tf[4]; /* 0x1f5 */
tf->device = gtf->tf[5]; /* 0x1f6 */
- tf->command = gtf->tf[6]; /* 0x1f7 */
+ tf->status = gtf->tf[6]; /* 0x1f7 */
}
static int ata_acpi_filter_tf(struct ata_device *dev,
@@ -679,7 +679,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
"(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
- rtf.command, rtf.feature);
+ rtf.status, rtf.error);
rc = 0;
break;
@@ -689,7 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
"(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
- err_mask, rtf.command, rtf.feature);
+ err_mask, rtf.status, rtf.error);
rc = -EIO;
break;
}
@@ -800,27 +800,6 @@ static int ata_acpi_push_id(struct ata_device *dev)
}
/**
- * ata_acpi_on_suspend - ATA ACPI hook called on suspend
- * @ap: target ATA port
- *
- * This function is called when @ap is about to be suspended. All
- * devices are already put to sleep but the port_suspend() callback
- * hasn't been executed yet. Error return from this function aborts
- * suspend.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_acpi_on_suspend(struct ata_port *ap)
-{
- /* nada */
- return 0;
-}
-
-/**
* ata_acpi_on_resume - ATA ACPI hook called on resume
* @ap: target ATA port
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 67f88027680a..cceedde51126 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1171,7 +1171,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
ata_dev_warn(dev,
"failed to read native max address (err_mask=0x%x)\n",
err_mask);
- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
return -EACCES;
return -EIO;
}
@@ -1235,7 +1235,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
"failed to set max address (err_mask=0x%x)\n",
err_mask);
if (err_mask == AC_ERR_DEV &&
- (tf.feature & (ATA_ABORTED | ATA_IDNF)))
+ (tf.error & (ATA_ABORTED | ATA_IDNF)))
return -EACCES;
return -EIO;
}
@@ -1584,7 +1584,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
/* perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_FAILED) {
- if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+ if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
if (!qc->err_mask)
@@ -1593,7 +1593,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
- qc->result_tf.command |= ATA_SENSE;
+ qc->result_tf.status |= ATA_SENSE;
}
/* finish up */
@@ -1813,7 +1813,7 @@ retry:
return 0;
}
- if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+ if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
/* Device or controller might have reported
* the wrong device class. Give a shot at the
* other IDENTIFY if the current one is
@@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
{
struct ata_port *ap = dev->link->ap;
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ return false;
+
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
return false;
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
@@ -2445,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev)
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
- if (!ata_identify_page_supported(dev,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ if (ata_id_major_version(dev->id) < 11 ||
+ !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
goto out;
/*
- * Read IDENTIFY DEVICE data log, page 0x47
- * (concurrent positioning ranges). We can have at most 255 32B range
- * descriptors plus a 64B header.
+ * Read the concurrent positioning ranges log (0x47). We can have at
+ * most 255 32B range descriptors plus a 64B header.
*/
buf_len = (64 + 255 * 32 + 511) & ~511;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
- err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES,
- buf, buf_len >> 9);
+ err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ 0, buf, buf_len >> 9);
if (err_mask)
goto out;
@@ -3568,7 +3569,7 @@ EXPORT_SYMBOL_GPL(ata_wait_after_reset);
* Kernel thread context (may sleep)
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * Always 0.
*/
int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
@@ -4028,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4073,6 +4075,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ /*
+ * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
+ * log page is accessed. Ensure we never ask for this log page with
+ * these devices.
+ */
+ { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+
/* End Marker */
{ }
};
@@ -4375,7 +4384,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
/* A clean abort indicates an original or just out of spec drive
and we should continue as we issue the setup based on the
drive reported working geometry */
- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
err_mask = 0;
return err_mask;
@@ -5170,10 +5179,9 @@ EXPORT_SYMBOL_GPL(ata_sas_port_resume);
*
* Suspend @host. Actual operation is performed by port suspend.
*/
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
host->dev->power.power_state = mesg;
- return 0;
}
EXPORT_SYMBOL_GPL(ata_host_suspend);
@@ -6090,11 +6098,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
ata_pci_device_do_suspend(pdev, mesg);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7951fd946bf9..3307ed45fe4d 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1386,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
if (err_mask == AC_ERR_DEV)
- *r_sense_key = tf.feature >> 4;
+ *r_sense_key = tf.error >> 4;
return err_mask;
}
@@ -1429,12 +1429,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc,
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/* Ignore err_mask; ATA_ERR might be set */
- if (tf.command & ATA_SENSE) {
+ if (tf.status & ATA_SENSE) {
ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
+ tf.status, err_mask);
}
}
@@ -1557,7 +1557,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
const struct ata_taskfile *tf)
{
unsigned int tmp, action = 0;
- u8 stat = tf->command, err = tf->feature;
+ u8 stat = tf->status, err = tf->error;
if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
qc->err_mask |= AC_ERR_HSM;
@@ -1594,7 +1594,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
tmp = atapi_eh_request_sense(qc->dev,
qc->scsicmd->sense_buffer,
- qc->result_tf.feature >> 4);
+ qc->result_tf.error >> 4);
if (!tmp)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
else
@@ -2360,7 +2360,7 @@ static void ata_eh_link_report(struct ata_link *link)
cmd->hob_feature, cmd->hob_nsect,
cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
cmd->device, qc->tag, data_buf, cdb_buf,
- res->command, res->feature, res->nsect,
+ res->status, res->error, res->nsect,
res->lbal, res->lbam, res->lbah,
res->hob_feature, res->hob_nsect,
res->hob_lbal, res->hob_lbam, res->hob_lbah,
@@ -2368,28 +2368,28 @@ static void ata_eh_link_report(struct ata_link *link)
qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
#ifdef CONFIG_ATA_VERBOSE_ERROR
- if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
- if (res->command & ATA_BUSY)
+ if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+ ATA_SENSE | ATA_ERR)) {
+ if (res->status & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
- res->command & ATA_DRDY ? "DRDY " : "",
- res->command & ATA_DF ? "DF " : "",
- res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
- res->command & ATA_ERR ? "ERR " : "");
+ res->status & ATA_DRDY ? "DRDY " : "",
+ res->status & ATA_DF ? "DF " : "",
+ res->status & ATA_DRQ ? "DRQ " : "",
+ res->status & ATA_SENSE ? "SENSE " : "",
+ res->status & ATA_ERR ? "ERR " : "");
}
if (cmd->command != ATA_CMD_PACKET &&
- (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
- ATA_IDNF | ATA_ABORTED)))
+ (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
+ ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
- res->feature & ATA_ICRC ? "ICRC " : "",
- res->feature & ATA_UNC ? "UNC " : "",
- res->feature & ATA_AMNF ? "AMNF " : "",
- res->feature & ATA_IDNF ? "IDNF " : "",
- res->feature & ATA_ABORTED ? "ABRT " : "");
+ res->error & ATA_ICRC ? "ICRC " : "",
+ res->error & ATA_UNC ? "UNC " : "",
+ res->error & ATA_AMNF ? "AMNF " : "",
+ res->error & ATA_IDNF ? "IDNF " : "",
+ res->error & ATA_ABORTED ? "ABRT " : "");
#endif
}
}
@@ -3902,11 +3902,6 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
}
}
- /* tell ACPI we're suspending */
- rc = ata_acpi_on_suspend(ap);
- if (rc)
- goto out;
-
/* suspend */
ata_eh_freeze_port(ap);
@@ -3914,7 +3909,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
ata_acpi_set_state(ap, ap->pm_mesg);
- out:
+
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 071158c0c44c..044a16daa2d4 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis);
void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
{
- tf->command = fis[2]; /* status */
- tf->feature = fis[3]; /* error */
+ tf->status = fis[2];
+ tf->error = fis[3];
tf->lbal = fis[4];
tf->lbam = fis[5];
@@ -1406,8 +1406,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
*tag = buf[0] & 0x1f;
- tf->command = buf[2];
- tf->feature = buf[3];
+ tf->status = buf[2];
+ tf->error = buf[3];
tf->lbal = buf[4];
tf->lbam = buf[5];
tf->lbah = buf[6];
@@ -1482,7 +1482,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
if (dev->class == ATA_DEV_ZAC &&
- ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
+ ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ed8be585a98f..9df1a20b77dd 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -680,7 +680,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
*/
static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
{
- u8 stat = tf->command, err = tf->feature;
+ u8 stat = tf->status, err = tf->error;
if (stat & ATA_BUSY) {
ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
@@ -871,8 +871,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq, verbose);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
@@ -901,13 +901,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* Copy registers into sense buffer.
*/
desc[2] = 0x00;
- desc[3] = tf->feature; /* == error reg */
+ desc[3] = tf->error;
desc[5] = tf->nsect;
desc[7] = tf->lbal;
desc[9] = tf->lbam;
desc[11] = tf->lbah;
desc[12] = tf->device;
- desc[13] = tf->command; /* == status reg */
+ desc[13] = tf->status;
/*
* Fill in Extend bit, and the high order bytes
@@ -922,8 +922,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
}
} else {
/* Fixed sense format */
- desc[0] = tf->feature;
- desc[1] = tf->command; /* status */
+ desc[0] = tf->error;
+ desc[1] = tf->status;
desc[2] = tf->device;
desc[3] = tf->nsect;
desc[7] = 0;
@@ -972,14 +972,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq, verbose);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
/* Could not decode error */
ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->command, qc->err_mask);
+ tf->status, qc->err_mask);
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -1314,21 +1314,10 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
* @plba: the LBA
* @plen: the transfer length
*/
-static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len = 0;
-
- lba |= ((u64)cdb[2]) << 24;
- lba |= ((u64)cdb[3]) << 16;
- lba |= ((u64)cdb[4]) << 8;
- lba |= ((u64)cdb[5]);
-
- len |= ((u32)cdb[7]) << 8;
- len |= ((u32)cdb[8]);
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be32(&cdb[2]);
+ *plen = get_unaligned_be16(&cdb[7]);
}
/**
@@ -1341,27 +1330,10 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
* @plba: the LBA
* @plen: the transfer length
*/
-static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len = 0;
-
- lba |= ((u64)cdb[2]) << 56;
- lba |= ((u64)cdb[3]) << 48;
- lba |= ((u64)cdb[4]) << 40;
- lba |= ((u64)cdb[5]) << 32;
- lba |= ((u64)cdb[6]) << 24;
- lba |= ((u64)cdb[7]) << 16;
- lba |= ((u64)cdb[8]) << 8;
- lba |= ((u64)cdb[9]);
-
- len |= ((u32)cdb[10]) << 24;
- len |= ((u32)cdb[11]) << 16;
- len |= ((u32)cdb[12]) << 8;
- len |= ((u32)cdb[13]);
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be64(&cdb[2]);
+ *plen = get_unaligned_be32(&cdb[10]);
}
/**
@@ -1390,19 +1362,22 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
- if (cdb[0] == VERIFY) {
+ switch (cdb[0]) {
+ case VERIFY:
if (scmd->cmd_len < 10) {
fp = 9;
goto invalid_fld;
}
scsi_10_lba_len(cdb, &block, &n_block);
- } else if (cdb[0] == VERIFY_16) {
+ break;
+ case VERIFY_16:
if (scmd->cmd_len < 16) {
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
- } else {
+ break;
+ default:
fp = 0;
goto invalid_fld;
}
@@ -1534,8 +1509,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
int rc;
u16 fp = 0;
- if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
+ switch (cdb[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
tf_flags |= ATA_TFLAG_WRITE;
+ break;
+ }
/* Calculate the SCSI LBA, transfer length and FUA. */
switch (cdb[0]) {
@@ -2493,7 +2473,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
/* fill these in, for the case where they are -not- overwritten */
cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = qc->tf.feature >> 4;
+ cmd->sense_buffer[2] = qc->tf.error >> 4;
ata_qc_reinit(qc);
@@ -2845,7 +2825,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
- if (cdb[0] == ATA_16) {
+ switch (cdb[0]) {
+ case ATA_16:
/*
* 16-byte CDB - may contain extended commands.
*
@@ -2871,7 +2852,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[12];
tf->device = cdb[13];
tf->command = cdb[14];
- } else if (cdb[0] == ATA_12) {
+ break;
+ case ATA_12:
/*
* 12-byte CDB - incapable of extended commands.
*/
@@ -2884,7 +2866,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[7];
tf->device = cdb[8];
tf->command = cdb[9];
- } else {
+ break;
+ default:
/*
* 32-byte CDB - may contain extended command fields.
*
@@ -2908,6 +2891,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->device = cdb[24];
tf->command = cdb[25];
tf->auxiliary = get_unaligned_be32(&cdb[28]);
+ break;
}
/* For NCQ commands copy the tag value */
@@ -3672,7 +3656,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
goto invalid_fld;
}
- len = (cdb[7] << 8) + cdb[8];
+ len = get_unaligned_be16(&cdb[7]);
hdr_len = 8;
}
@@ -3698,7 +3682,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (six_byte)
bd_len = p[3];
else
- bd_len = (p[6] << 8) + p[7];
+ bd_len = get_unaligned_be16(&p[6]);
len -= hdr_len;
p += hdr_len;
@@ -3722,7 +3706,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
spg = p[1];
- pg_len = (p[2] << 8) | p[3];
+ pg_len = get_unaligned_be16(&p[2]);
p += 4;
len -= 4;
} else {
@@ -3933,7 +3917,6 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case MODE_SELECT:
case MODE_SELECT_10:
return ata_scsi_mode_select_xlat;
- break;
case ZBC_IN:
return ata_scsi_zbc_in_xlat;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 75217828dfe3..b3be7a8f5bea 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -70,22 +70,35 @@ EXPORT_SYMBOL_GPL(ata_sff_check_status);
/**
* ata_sff_altstatus - Read device alternate status reg
* @ap: port where the device is
+ * @status: pointer to a status value
*
- * Reads ATA taskfile alternate status register for
- * currently-selected device and return its value.
+ * Reads ATA alternate status register for currently-selected device
+ * and return its value.
*
- * Note: may NOT be used as the check_altstatus() entry in
- * ata_port_operations.
+ * RETURN:
+ * true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
-static u8 ata_sff_altstatus(struct ata_port *ap)
+static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
{
- if (ap->ops->sff_check_altstatus)
- return ap->ops->sff_check_altstatus(ap);
+ u8 tmp;
+
+ if (ap->ops->sff_check_altstatus) {
+ tmp = ap->ops->sff_check_altstatus(ap);
+ goto read;
+ }
+ if (ap->ioaddr.altstatus_addr) {
+ tmp = ioread8(ap->ioaddr.altstatus_addr);
+ goto read;
+ }
+ return false;
- return ioread8(ap->ioaddr.altstatus_addr);
+read:
+ if (status)
+ *status = tmp;
+ return true;
}
/**
@@ -104,12 +117,9 @@ static u8 ata_sff_irq_status(struct ata_port *ap)
{
u8 status;
- if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
- status = ata_sff_altstatus(ap);
- /* Not us: We are busy */
- if (status & ATA_BUSY)
- return status;
- }
+ /* Not us: We are busy */
+ if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
+ return status;
/* Clear INTRQ latch */
status = ap->ops->sff_check_status(ap);
return status;
@@ -129,10 +139,7 @@ static u8 ata_sff_irq_status(struct ata_port *ap)
static void ata_sff_sync(struct ata_port *ap)
{
- if (ap->ops->sff_check_altstatus)
- ap->ops->sff_check_altstatus(ap);
- else if (ap->ioaddr.altstatus_addr)
- ioread8(ap->ioaddr.altstatus_addr);
+ ata_sff_altstatus(ap, NULL);
}
/**
@@ -164,12 +171,12 @@ EXPORT_SYMBOL_GPL(ata_sff_pause);
void ata_sff_dma_pause(struct ata_port *ap)
{
- if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
- /* An altstatus read will cause the needed delay without
- messing up the IRQ status */
- ata_sff_altstatus(ap);
+ /*
+ * An altstatus read will cause the needed delay without
+ * messing up the IRQ status
+ */
+ if (ata_sff_altstatus(ap, NULL))
return;
- }
/* There are no DMA controllers without ctl. BUG here to ensure
we never violate the HDMA1:0 transition timing and risk
corruption. */
@@ -265,20 +272,26 @@ EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
* @ap: port where the device is
* @ctl: value to write
*
- * Writes ATA taskfile device control register.
+ * Writes ATA device control register.
*
- * Note: may NOT be used as the sff_set_devctl() entry in
- * ata_port_operations.
+ * RETURN:
+ * true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
-static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
{
- if (ap->ops->sff_set_devctl)
+ if (ap->ops->sff_set_devctl) {
ap->ops->sff_set_devctl(ap, ctl);
- else
+ return true;
+ }
+ if (ap->ioaddr.ctl_addr) {
iowrite8(ctl, ap->ioaddr.ctl_addr);
+ return true;
+ }
+
+ return false;
}
/**
@@ -357,8 +370,6 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device,
*/
void ata_sff_irq_on(struct ata_port *ap)
{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
if (ap->ops->sff_irq_on) {
ap->ops->sff_irq_on(ap);
return;
@@ -367,8 +378,7 @@ void ata_sff_irq_on(struct ata_port *ap)
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
- ata_sff_set_devctl(ap, ap->ctl);
+ ata_sff_set_devctl(ap, ap->ctl);
ata_wait_idle(ap);
if (ap->ops->sff_irq_clear)
@@ -439,8 +449,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = ata_sff_check_status(ap);
- tf->feature = ioread8(ioaddr->error_addr);
+ tf->status = ata_sff_check_status(ap);
+ tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
@@ -1634,14 +1644,14 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
return;
/* See if the controller thinks it is still busy - if so the command
isn't a lost IRQ but is still in progress */
- status = ata_sff_altstatus(ap);
+ if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
+ return;
if (status & ATA_BUSY)
return;
/* There was a command running, we are no longer busy and we have
no interrupt. */
- ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
- status);
+ ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
ata_sff_port_intr(ap, qc);
@@ -1662,8 +1672,7 @@ void ata_sff_freeze(struct ata_port *ap)
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
- ata_sff_set_devctl(ap, ap->ctl);
+ ata_sff_set_devctl(ap, ap->ctl);
/* Under certain circumstances, some controllers raise IRQ on
* ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1708,16 +1717,15 @@ EXPORT_SYMBOL_GPL(ata_sff_thaw);
* Kernel thread context (may sleep)
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * Always 0.
*/
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
int rc;
- rc = ata_std_prereset(link, deadline);
- if (rc)
- return rc;
+ /* The standard prereset is best-effort and always returns 0 */
+ ata_std_prereset(link, deadline);
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
@@ -1752,10 +1760,13 @@ EXPORT_SYMBOL_GPL(ata_sff_prereset);
* correctly storing and echoing back the
* ATA shadow register contents.
*
+ * RETURN:
+ * true if device is present, false if not.
+ *
* LOCKING:
* caller.
*/
-static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
+static bool ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -1775,9 +1786,9 @@ static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
lbal = ioread8(ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
+ return true; /* we found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
/**
@@ -1814,7 +1825,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
memset(&tf, 0, sizeof(tf));
ap->ops->sff_tf_read(ap, &tf);
- err = tf.feature;
+ err = tf.error;
if (r_err)
*r_err = err;
@@ -1831,9 +1842,10 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
/* determine if device is ATA or ATAPI */
class = ata_port_classify(ap, &tf);
-
- if (class == ATA_DEV_UNKNOWN) {
- /* If the device failed diagnostic, it's likely to
+ switch (class) {
+ case ATA_DEV_UNKNOWN:
+ /*
+ * If the device failed diagnostic, it's likely to
* have reported incorrect device signature too.
* Assume ATA device if the device seems present but
* device signature is invalid with diagnostic
@@ -1843,10 +1855,12 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
- } else if ((class == ATA_DEV_ATA) &&
- (ap->ops->sff_check_status(ap) == 0))
- class = ATA_DEV_NONE;
-
+ break;
+ case ATA_DEV_ATA:
+ if (ap->ops->sff_check_status(ap) == 0)
+ class = ATA_DEV_NONE;
+ break;
+ }
return class;
}
EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
@@ -2059,10 +2073,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
return;
/* set up device control */
- if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
- ata_sff_set_devctl(ap, ap->ctl);
+ if (ata_sff_set_devctl(ap, ap->ctl))
ap->last_ctl = ap->ctl;
- }
}
EXPORT_SYMBOL_GPL(ata_sff_postreset);
@@ -2172,18 +2184,18 @@ EXPORT_SYMBOL_GPL(ata_sff_std_ports);
#ifdef CONFIG_PCI
-static int ata_resources_present(struct pci_dev *pdev, int port)
+static bool ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
/* Check the PCI resources for this channel are enabled */
- port = port * 2;
+ port *= 2;
for (i = 0; i < 2; i++) {
if (pci_resource_start(pdev, port + i) == 0 ||
pci_resource_len(pdev, port + i) == 0)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/**
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 51e01acdd241..c9c2496d91ea 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -107,7 +107,6 @@ static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { }
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
extern void ata_acpi_dissociate(struct ata_host *host);
-extern int ata_acpi_on_suspend(struct ata_port *ap);
extern void ata_acpi_on_resume(struct ata_port *ap);
extern int ata_acpi_on_devcfg(struct ata_device *dev);
extern void ata_acpi_on_disable(struct ata_device *dev);
@@ -117,7 +116,6 @@ extern void ata_acpi_bind_dev(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
-static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
static inline void ata_acpi_on_resume(struct ata_port *ap) { }
static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
static inline void ata_acpi_on_disable(struct ata_device *dev) { }
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 24c3d5e1fca3..e89617ed9175 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -937,7 +937,8 @@ static int arasan_cf_suspend(struct device *dev)
dmaengine_terminate_all(acdev->dma_chan);
cf_exit(acdev);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int arasan_cf_resume(struct device *dev)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ad3c5808aaad..20a8f31a3f57 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -28,7 +28,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
-#define DRV_VERSION "0.4.6"
+#define DRV_VERSION "0.4.8"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
@@ -315,12 +315,15 @@ static struct ata_port_operations artop6260_ops = {
static void atp8xx_fixup(struct pci_dev *pdev)
{
- if (pdev->device == 0x0005)
+ u8 reg;
+
+ switch (pdev->device) {
+ case 0x0005:
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
- else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
- u8 reg;
-
+ break;
+ case 0x0008:
+ case 0x0009:
/* Mac systems come up with some registers not set as we
will need them */
@@ -338,6 +341,7 @@ static void atp8xx_fixup(struct pci_dev *pdev)
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, &reg);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+ break;
}
}
@@ -394,16 +398,19 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- if (id->driver_data == 0) /* 6210 variant */
+ switch (id->driver_data) {
+ case 0: /* 6210 variant */
ppi[0] = &info_6210;
- else if (id->driver_data == 1) /* 6260 */
+ break;
+ case 1: /* 6260 */
ppi[0] = &info_626x;
- else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
- unsigned long io = pci_resource_start(pdev, 4);
-
- ppi[0] = &info_628x;
- if (inb(io) & 0x10)
+ break;
+ case 2: /* 6280 or 6280 + fast */
+ if (inb(pci_resource_start(pdev, 4)) & 0x10)
ppi[0] = &info_628x_fast;
+ else
+ ppi[0] = &info_628x;
+ break;
}
BUG_ON(ppi[0] == NULL);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index c3a65ccd4b79..efdb94cff68b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -102,7 +102,7 @@ static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
- static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
+ static const u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
@@ -149,7 +149,7 @@ static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
+ static const u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 24ce8665b1f9..f4289a532f87 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -259,11 +259,8 @@ static int cs5520_reinit_one(struct pci_dev *pdev)
static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
pci_save_state(pdev);
return 0;
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index b78f71c70f27..6c75a22db12b 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
- tf->command = ep93xx_pata_check_status(ap);
- tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
+ tf->status = ep93xx_pata_check_status(ap);
+ tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 34cb104f6b43..2e35505b683c 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -554,10 +554,8 @@ static int pata_ftide010_remove(struct platform_device *pdev)
}
static const struct of_device_id pata_ftide010_of_match[] = {
- {
- .compatible = "faraday,ftide010",
- },
- {},
+ { .compatible = "faraday,ftide010", },
+ { /* sentinel */ }
};
static struct platform_driver pata_ftide010_driver = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 778c893f276b..c99e8f0708b3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.6.11"
+#define DRV_VERSION "0.6.13"
struct hpt_clock {
u8 xfer_mode;
@@ -278,6 +278,40 @@ static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
hpt366_set_mode(ap, adev, adev->dma_mode);
}
+/**
+ * hpt366_prereset - reset the hpt36x bus
+ * @link: ATA link to reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the initial reset handling for the 36x series controllers.
+ * Reset the hardware and state machine,
+ */
+
+static int hpt366_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ /*
+ * HPT36x chips have one channel per function and have
+ * both channel enable bits located differently and visible
+ * to both functions -- really stupid design decision... :-(
+ * Bit 4 is for the primary channel, bit 5 for the secondary.
+ */
+ static const struct pci_bits hpt366_enable_bits = {
+ 0x50, 1, 0x30, 0x30
+ };
+ u8 mcr2;
+
+ if (!pci_test_config_bits(pdev, &hpt366_enable_bits))
+ return -ENOENT;
+
+ pci_read_config_byte(pdev, 0x51, &mcr2);
+ if (mcr2 & 0x80)
+ pci_write_config_byte(pdev, 0x51, mcr2 & ~0x80);
+
+ return ata_sff_prereset(link, deadline);
+}
+
static struct scsi_host_template hpt36x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -288,6 +322,7 @@ static struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
+ .prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
@@ -304,16 +339,20 @@ static struct ata_port_operations hpt366_port_ops = {
static void hpt36x_init_chipset(struct pci_dev *dev)
{
- u8 drive_fast;
+ u8 mcr1;
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
- pci_read_config_byte(dev, 0x51, &drive_fast);
- if (drive_fast & 0x80)
- pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+ /*
+ * Now we'll have to force both channels enabled if at least one
+ * of them has been enabled by BIOS...
+ */
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ if (mcr1 & 0x30)
+ pci_write_config_byte(dev, 0x50, mcr1 | 0x30);
}
/**
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 7abc7e04f656..156f304ef051 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.23"
+#define DRV_VERSION "0.6.25"
struct hpt_clock {
u8 xfer_speed;
@@ -394,6 +394,7 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
{ 0x50, 1, 0x04, 0x04 },
{ 0x54, 1, 0x04, 0x04 }
};
+ u8 mcr2;
if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
return -ENOENT;
@@ -402,25 +403,29 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
+ /*
+ * Disable the "fast interrupt" prediction. Don't hold off
+ * on interrupts. (== 0x01 despite what the docs say)
+ */
+ pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
+ /* Is it HPT370/A? */
+ if (pdev->device == PCI_DEVICE_ID_TTI_HPT366 && pdev->revision < 5) {
+ mcr2 &= ~0x02;
+ mcr2 |= 0x01;
+ } else {
+ mcr2 &= ~0x07;
+ }
+ pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
+
return ata_sff_prereset(link, deadline);
}
-static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
+static void hpt37x_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
+ int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x02;
- fast |= 0x01;
- pci_write_config_byte(pdev, addr2, fast);
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
@@ -432,34 +437,34 @@ static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
timing = hpt37x_find_mode(ap, mode);
- pci_read_config_dword(pdev, addr1, &reg);
+ pci_read_config_dword(pdev, addr, &reg);
reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
+ pci_write_config_dword(pdev, addr, reg);
}
/**
- * hpt370_set_piomode - PIO setup
+ * hpt37x_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
-static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void hpt37x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- hpt370_set_mode(ap, adev, adev->pio_mode);
+ hpt37x_set_mode(ap, adev, adev->pio_mode);
}
/**
- * hpt370_set_dmamode - DMA timing setup
+ * hpt37x_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes.
*/
-static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+static void hpt37x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- hpt370_set_mode(ap, adev, adev->dma_mode);
+ hpt37x_set_mode(ap, adev, adev->dma_mode);
}
/**
@@ -499,63 +504,6 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
ata_bmdma_stop(qc);
}
-static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev,
- u8 mode)
-{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
- u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
-
- /* Determine timing mask and find matching mode entry */
- if (mode < XFER_MW_DMA_0)
- mask = 0xcfc3ffff;
- else if (mode < XFER_UDMA_0)
- mask = 0x31c001ff;
- else
- mask = 0x303c0000;
-
- timing = hpt37x_find_mode(ap, mode);
-
- pci_read_config_dword(pdev, addr1, &reg);
- reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
-}
-
-/**
- * hpt372_set_piomode - PIO setup
- * @ap: ATA interface
- * @adev: device on the interface
- *
- * Perform PIO mode setup.
- */
-
-static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- hpt372_set_mode(ap, adev, adev->pio_mode);
-}
-
-/**
- * hpt372_set_dmamode - DMA timing setup
- * @ap: ATA interface
- * @adev: Device being configured
- *
- * Set up the channel for MWDMA or UDMA modes.
- */
-
-static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
-{
- hpt372_set_mode(ap, adev, adev->dma_mode);
-}
-
/**
* hpt37x_bmdma_stop - DMA engine stop
* @qc: ATA command
@@ -593,8 +541,8 @@ static struct ata_port_operations hpt370_port_ops = {
.mode_filter = hpt370_filter,
.cable_detect = hpt37x_cable_detect,
- .set_piomode = hpt370_set_piomode,
- .set_dmamode = hpt370_set_dmamode,
+ .set_piomode = hpt37x_set_piomode,
+ .set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
@@ -608,8 +556,7 @@ static struct ata_port_operations hpt370a_port_ops = {
};
/*
- * Configuration for HPT371 and HPT302. Slightly different PIO and DMA
- * mode setting functionality.
+ * Configuration for HPT371 and HPT302.
*/
static struct ata_port_operations hpt302_port_ops = {
@@ -618,8 +565,8 @@ static struct ata_port_operations hpt302_port_ops = {
.bmdma_stop = hpt37x_bmdma_stop,
.cable_detect = hpt37x_cable_detect,
- .set_piomode = hpt372_set_piomode,
- .set_dmamode = hpt372_set_dmamode,
+ .set_piomode = hpt37x_set_piomode,
+ .set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
@@ -920,6 +867,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_write_config_byte(dev, 0x5a, irqmask);
/*
+ * HPT371 chips physically have only one channel, the secondary one,
+ * but the primary channel registers do exist! Go figure...
+ * So, we manually disable the non-existing channel here
+ * (if the BIOS hasn't done this already).
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+ u8 mcr1;
+
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ }
+
+ /*
* default to pci clock. make sure MA15/16 are set to output
* to prevent drives having problems with 40-pin cables. Needed
* for some drives such as IBM-DTLA which will not enter ready
@@ -950,14 +911,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if ((freq >> 12) != 0xABCDE) {
int i;
- u8 sr;
+ u16 sr;
u32 total = 0;
dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
- pci_read_config_byte(dev, 0x78, &sr);
+ pci_read_config_word(dev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
}
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 1d9d4eec5b8a..1f6afd8ee29b 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -24,10 +24,9 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.15"
+#define DRV_VERSION "0.3.18"
enum {
- HPT_PCI_FAST = (1 << 31),
PCI66 = (1 << 1),
USE_DPLL = (1 << 0)
};
@@ -37,11 +36,6 @@ struct hpt_clock {
u32 timing;
};
-struct hpt_chip {
- const char *name;
- struct hpt_clock *clocks[3];
-};
-
/* key for bus clock timings
* bit
* 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
@@ -168,11 +162,24 @@ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits hpt3x2n_enable_bits[] = {
+ { 0x50, 1, 0x04, 0x04 },
+ { 0x54, 1, 0x04, 0x04 }
+ };
+ u8 mcr2;
+
+ if (!pci_test_config_bits(pdev, &hpt3x2n_enable_bits[ap->port_no]))
+ return -ENOENT;
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
+ mcr2 &= ~0x07;
+ pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
+
return ata_sff_prereset(link, deadline);
}
@@ -180,17 +187,8 @@ static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
+ int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
@@ -202,9 +200,9 @@ static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
timing = hpt3x2n_find_mode(ap, mode);
- pci_read_config_dword(pdev, addr1, &reg);
+ pci_read_config_dword(pdev, addr, &reg);
reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
+ pci_write_config_dword(pdev, addr, reg);
}
/**
@@ -244,7 +242,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int mscreg = 0x50 + 2 * ap->port_no;
+ int mscreg = 0x50 + 4 * ap->port_no;
u8 bwsr_stat, msc_stat;
pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 2e538726802b..150939275b1b 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -223,17 +223,14 @@ static int pata_imx_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- int ret;
- ret = ata_host_suspend(host, PMSG_SUSPEND);
- if (!ret) {
- __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
- priv->ata_ctl =
- __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
- clk_disable_unprepare(priv->clk);
- }
+ ata_host_suspend(host, PMSG_SUSPEND);
- return ret;
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+ priv->ata_ctl = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
}
static int pata_imx_resume(struct device *dev)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 17b557c91e1c..e225913a619d 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -293,7 +293,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
- { },
+ { /* sentinel */ }
};
static struct platform_driver ixp4xx_pata_platform_driver = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 16e8aa184a75..42798402cf63 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -853,12 +853,8 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
#ifdef CONFIG_PM_SLEEP
static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
{
- int rc;
-
/* First, core libata suspend to do most of the work */
- rc = ata_host_suspend(priv->host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(priv->host, mesg);
/* Restore to default timings */
pata_macio_default_timings(priv);
@@ -1333,19 +1329,11 @@ static int pata_macio_pci_resume(struct pci_dev *pdev)
static const struct of_device_id pata_macio_match[] =
{
- {
- .name = "IDE",
- },
- {
- .name = "ATA",
- },
- {
- .type = "ide",
- },
- {
- .type = "ata",
- },
- {},
+ { .name = "IDE", },
+ { .name = "ATA", },
+ { .type = "ide", },
+ { .type = "ata", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_macio_match);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index f1d352d5f128..3250ef317df6 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -736,7 +736,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
}
/* Prepare our private structure */
- priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC);
+ priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
rv = -ENOMEM;
goto err1;
@@ -824,7 +824,8 @@ mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
- return ata_host_suspend(host, state);
+ ata_host_suspend(host, state);
+ return 0;
}
static int
@@ -849,7 +850,7 @@ mpc52xx_ata_resume(struct platform_device *op)
static const struct of_device_id mpc52xx_ata_of_match[] = {
{ .compatible = "fsl,mpc5200-ata", },
{ .compatible = "mpc5200-ata", },
- {},
+ { /* sentinel */ }
};
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index f4949e704356..9dd6bffefb48 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -264,8 +264,8 @@ void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = ns87560_check_status(ap);
- tf->feature = ioread8(ioaddr->error_addr);
+ tf->status = ns87560_check_status(ap);
+ tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 05c2ab375756..6b5ed3046b44 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -382,7 +382,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
void __iomem *base = ap->ioaddr.data_addr;
blob = __raw_readw(base + 0xc);
- tf->feature = blob >> 8;
+ tf->error = blob >> 8;
blob = __raw_readw(base + 2);
tf->nsect = blob & 0xff;
@@ -394,7 +394,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
blob = __raw_readw(base + 6);
tf->device = blob & 0xff;
- tf->command = blob >> 8;
+ tf->status = blob >> 8;
if (tf->flags & ATA_TFLAG_LBA48) {
if (likely(ap->ioaddr.ctl_addr)) {
@@ -1006,10 +1006,8 @@ static void octeon_cf_shutdown(struct device *dev)
}
static const struct of_device_id octeon_cf_match[] = {
- {
- .compatible = "cavium,ebt3000-compact-flash",
- },
- {},
+ { .compatible = "cavium,ebt3000-compact-flash", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_cf_match);
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index c3a40b717dcd..ac5a633c00a5 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -79,7 +79,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
static const struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index b99849095853..f894ff2de0a9 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -78,7 +78,7 @@ static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *a
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
- static u16 pio_timing[5] = {
+ static const u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
u8 r_ap, r_bp;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 028329428b75..21fb059859bd 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -128,6 +128,8 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res,
ap = host->ports[0];
ap->ops = devm_kzalloc(dev, sizeof(*ap->ops), GFP_KERNEL);
+ if (!ap->ops)
+ return -ENOMEM;
ap->ops->inherits = &ata_sff_port_ops;
ap->ops->cable_detect = ata_cable_unknown;
ap->ops->set_mode = pata_platform_set_mode;
@@ -198,22 +200,16 @@ static int pata_platform_probe(struct platform_device *pdev)
/*
* Get the I/O base first
*/
- io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (io_res == NULL) {
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(io_res == NULL))
- return -EINVAL;
- }
+ io_res = platform_get_mem_or_io(pdev, 0);
+ if (!io_res)
+ return -EINVAL;
/*
* Then the CTL base
*/
- ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
- if (ctl_res == NULL) {
- ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (unlikely(ctl_res == NULL))
- return -EINVAL;
- }
+ ctl_res = platform_get_mem_or_io(pdev, 1);
+ if (!ctl_res)
+ return -EINVAL;
/*
* And the IRQ
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 41430f79663c..985f42c4fd70 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -164,10 +164,10 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *cmd_res;
struct resource *ctl_res;
struct resource *dma_res;
- struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
int ret = 0;
+ int irq;
/*
* Resource validation, three resources are needed:
@@ -205,9 +205,9 @@ static int pxa_ata_probe(struct platform_device *pdev)
/*
* IRQ pin
*/
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(irq_res == NULL))
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/*
* Allocate the host
@@ -287,7 +287,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
/*
* Activate the ATA host
*/
- ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
+ ret = ata_host_activate(host, irq, ata_sff_interrupt,
pdata->irq_flags, &pxa_ata_sht);
if (ret)
dma_release_channel(data->dma_chan);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 3da0e8e30286..aba1536ddd44 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->feature = ata_inb(ap->host, ioaddr->error_addr);
+ tf->error = ata_inb(ap->host, ioaddr->error_addr);
tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
@@ -308,8 +308,7 @@ static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
/*
* pata_s3c_devchk - PATA device presence detection
*/
-static unsigned int pata_s3c_devchk(struct ata_port *ap,
- unsigned int device)
+static bool pata_s3c_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -329,9 +328,9 @@ static unsigned int pata_s3c_devchk(struct ata_port *ap,
lbal = ata_inb(ap->host, ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
+ return true; /* we found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
/*
@@ -608,7 +607,8 @@ static int pata_s3c_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int pata_s3c_resume(struct device *dev)
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 8a033598e7e1..782162d2f3f8 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -198,11 +198,8 @@ static const struct pci_device_id triflex[] = {
static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
/*
* We must not disable or powerdown the device.
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index da0152116d9f..b9a4f68b371d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d %d\n",
+ return sysfs_emit(buf, "%u %u\n",
intr_coalescing_count, intr_coalescing_ticks);
}
@@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
{
unsigned int coalescing_count, coalescing_ticks;
- if (sscanf(buf, "%d%d",
- &coalescing_count,
- &coalescing_ticks) != 2) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
rx_watermark &= 0x1f;
spin_unlock_irqrestore(&host->lock, flags);
- return sysfs_emit(buf, "%d\n", rx_watermark);
+ return sysfs_emit(buf, "%u\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
@@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
void __iomem *csr_base = host_priv->csr_base;
u32 temp;
- if (sscanf(buf, "%d", &rx_watermark) != 1) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (kstrtouint(buf, 10, &rx_watermark) < 0) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
temp = ioread32(csr_base + TRANSCFG);
temp &= 0xffffffe0;
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
-
spin_unlock_irqrestore(&host->lock, flags);
+
return strlen(buf);
}
@@ -1546,7 +1544,9 @@ static int sata_fsl_remove(struct platform_device *ofdev)
static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
- return ata_host_suspend(host, state);
+
+ ata_host_suspend(host, state);
+ return 0;
}
static int sata_fsl_resume(struct platform_device *op)
@@ -1579,13 +1579,9 @@ static int sata_fsl_resume(struct platform_device *op)
#endif
static const struct of_device_id fsl_sata_match[] = {
- {
- .compatible = "fsl,pq-sata",
- },
- {
- .compatible = "fsl,pq-sata-v2",
- },
- {},
+ { .compatible = "fsl,pq-sata", },
+ { .compatible = "fsl,pq-sata-v2", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_sata_match);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 440a63de20d0..00e1c7941d0e 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -419,10 +419,8 @@ static int gemini_sata_remove(struct platform_device *pdev)
}
static const struct of_device_id gemini_sata_of_match[] = {
- {
- .compatible = "cortina,gemini-sata-bridge",
- },
- {},
+ { .compatible = "cortina,gemini-sata-bridge", },
+ { /* sentinel */ }
};
static struct platform_driver gemini_sata_driver = {
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index b29d3f1d64b0..dfbf9493e451 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
@@ -444,7 +444,7 @@ static struct scsi_host_template ahci_highbank_platform_sht = {
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci" },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -587,7 +587,8 @@ static int ahci_highbank_suspend(struct device *dev)
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int ahci_highbank_resume(struct device *dev)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 781901151d82..11e518f0111c 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -557,13 +557,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
void __iomem *port_base = inic_port_base(ap);
- tf->feature = readb(port_base + PORT_TF_FEATURE);
+ tf->error = readb(port_base + PORT_TF_FEATURE);
tf->nsect = readb(port_base + PORT_TF_NSECT);
tf->lbal = readb(port_base + PORT_TF_LBAL);
tf->lbam = readb(port_base + PORT_TF_LBAM);
tf->lbah = readb(port_base + PORT_TF_LBAH);
tf->device = readb(port_base + PORT_TF_DEVICE);
- tf->command = readb(port_base + PORT_TF_COMMAND);
+ tf->status = readb(port_base + PORT_TF_COMMAND);
}
static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
@@ -580,11 +580,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
inic_tf_read(qc->ap, &tf);
- if (!(tf.command & ATA_ERR))
+ if (!(tf.status & ATA_ERR))
return false;
- rtf->command = tf.command;
- rtf->feature = tf.feature;
+ rtf->status = tf.status;
+ rtf->error = tf.error;
return true;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 53446b997740..de5bd02cad44 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4235,10 +4235,10 @@ static int mv_platform_remove(struct platform_device *pdev)
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
+
if (host)
- return ata_host_suspend(host, state);
- else
- return 0;
+ ata_host_suspend(host, state);
+ return 0;
}
static int mv_platform_resume(struct platform_device *pdev)
@@ -4277,7 +4277,7 @@ static int mv_platform_resume(struct platform_device *pdev)
static const struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
#endif
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 3d96b6faa3f0..590ebea99601 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -18,10 +18,6 @@
#define DRV_NAME "sata_rcar"
-/* SH-Navi2G/ATAPI-ATA compatible task registers */
-#define DATA_REG 0x100
-#define SDEVCON_REG 0x138
-
/* SH-Navi2G/ATAPI module compatible control registers */
#define ATAPI_CONTROL1_REG 0x180
#define ATAPI_STATUS_REG 0x184
@@ -283,8 +279,7 @@ static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
ata_sff_pause(ap); /* needed; also flushes, for mmio */
}
-static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
- unsigned int device)
+static bool sata_rcar_ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -304,9 +299,9 @@ static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
lbal = ioread32(ioaddr->lbal_addr);
if (nsect == 0x55 && lbal == 0xaa)
- return 1; /* found a device */
+ return true; /* found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
static int sata_rcar_wait_after_reset(struct ata_link *link,
@@ -399,8 +394,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = sata_rcar_check_status(ap);
- tf->feature = ioread32(ioaddr->error_addr);
+ tf->status = sata_rcar_check_status(ap);
+ tf->error = ioread32(ioaddr->error_addr);
tf->nsect = ioread32(ioaddr->nsect_addr);
tf->lbal = ioread32(ioaddr->lbal_addr);
tf->lbam = ioread32(ioaddr->lbam_addr);
@@ -857,7 +852,7 @@ static const struct of_device_id sata_rcar_match[] = {
.compatible = "renesas,rcar-gen3-sata",
.data = (void *)RCAR_GEN3_SATA
},
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -945,19 +940,17 @@ static int sata_rcar_suspend(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
- int ret;
- ret = ata_host_suspend(host, PMSG_SUSPEND);
- if (!ret) {
- /* disable interrupts */
- iowrite32(0, base + ATAPI_INT_ENABLE_REG);
- /* mask */
- iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+ ata_host_suspend(host, PMSG_SUSPEND);
- pm_runtime_put(dev);
- }
+ /* disable interrupts */
+ iowrite32(0, base + ATAPI_INT_ENABLE_REG);
+ /* mask */
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
- return ret;
+ pm_runtime_put(dev);
+
+ return 0;
}
static int sata_rcar_resume(struct device *dev)
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index f8552559db7f..2e3418a82b44 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u16 nsect, lbal, lbam, lbah, feature;
+ u16 nsect, lbal, lbam, lbah, error;
- tf->command = k2_stat_check_status(ap);
+ tf->status = k2_stat_check_status(ap);
tf->device = readw(ioaddr->device_addr);
- feature = readw(ioaddr->error_addr);
+ error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
- tf->feature = feature;
+ tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
- tf->hob_feature = feature >> 8;
+ tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8fa952cb9f7f..87e4ed66b306 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u16 nsect, lbal, lbam, lbah, feature;
+ u16 nsect, lbal, lbam, lbah, error;
- tf->command = ata_sff_check_status(ap);
+ tf->status = ata_sff_check_status(ap);
tf->device = readw(ioaddr->device_addr);
- feature = readw(ioaddr->error_addr);
+ error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
- tf->feature = feature;
+ tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
- tf->hob_feature = feature >> 8;
+ tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 422753d52244..a31ffe16e626 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
skb_data3 = skb->data[3];
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
+ return enq_next;
ENI_PRV_PADDR(skb) = paddr;
/* prepare DMA queue entries */
j = 0;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 3bc3c314a467..4f67404fe64c 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev)
dev->hw_base = pci_resource_start(pci_dev, 0);
dev->base = ioremap(dev->hw_base, 0x1000);
+ if (!dev->base)
+ return 1;
reset_chip (dev);
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 38ba08628ccb..2578b2d45439 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
if (buf[1] > 7)
return 1;
- i = 0;
+ i = 2;
shift = 0;
value = 0;
while (*esc && i < LCD2S_CHARACTER_SIZE + 2) {
@@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
+ lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
+ if (!lcd2s)
+ return -ENOMEM;
+
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
@@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
if (!lcd)
return -ENOMEM;
- lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL);
- if (!lcd2s) {
- err = -ENOMEM;
- goto fail1;
- }
-
lcd->drvdata = lcd2s;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
err = device_property_read_u32(&i2c->dev, "display-height-chars",
&lcd->height);
if (err)
- goto fail2;
+ goto fail1;
err = device_property_read_u32(&i2c->dev, "display-width-chars",
&lcd->width);
if (err)
- goto fail2;
+ goto fail1;
lcd->ops = &lcd2s_ops;
err = charlcd_register(lcd2s->charlcd);
if (err)
- goto fail2;
+ goto fail1;
i2c_set_clientdata(i2c, lcd2s);
return 0;
-fail2:
- kfree(lcd2s);
fail1:
- kfree(lcd);
+ charlcd_free(lcd2s->charlcd);
return err;
}
@@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c)
struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
charlcd_unregister(lcd2s->charlcd);
- kfree(lcd2s->charlcd);
+ charlcd_free(lcd2s->charlcd);
return 0;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 976154140f0b..1d6636ebaac5 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -339,6 +339,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+void topology_init_cpu_capacity_cppc(void)
+{
+ struct cppc_perf_caps perf_caps;
+ int cpu;
+
+ if (likely(acpi_disabled || !acpi_cpc_valid()))
+ return;
+
+ raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (!cppc_get_perf_caps(cpu, &perf_caps) &&
+ (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
+ (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
+ raw_capacity[cpu] = perf_caps.highest_perf;
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
+ cpu, raw_capacity[cpu]);
+ continue;
+ }
+
+ pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ goto exit;
+ }
+
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ pr_debug("cpu_capacity: cpu_capacity initialization done\n");
+
+exit:
+ free_raw_capacity();
+}
+#endif
+
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
@@ -387,9 +427,8 @@ static int __init register_cpufreq_notifier(void)
int ret;
/*
- * on ACPI-based systems we need to use the default cpu capacity
- * until we have the necessary code to parse the cpu capacity, so
- * skip registering cpufreq notifier.
+ * On ACPI-based systems skip registering cpufreq notifier as cpufreq
+ * information is not needed for cpu capacity initialization.
*/
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7476f393df97..8feb85e186e3 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -16,7 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bb957b11861..3d6430eb0c6a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -21,7 +21,7 @@
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5fc258073bc7..2ef23fce0860 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -275,7 +275,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
return -ENOMEM;
cpumask_andnot(isolated, cpu_possible_mask,
- housekeeping_cpumask(HK_FLAG_DOMAIN));
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9eaaff2f556c..f47cab21430f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -629,6 +629,9 @@ re_probe:
drv->remove(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
devres_release_all(dev);
arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index f41063ac1aee..db5a03a0618e 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -17,7 +17,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
diff --git a/drivers/base/init.c b/drivers/base/init.c
index a9f57c22fb9e..d8d0fe687111 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -35,5 +35,6 @@ void __init driver_init(void)
auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
+ node_dev_init();
container_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 365cd4a7f239..7222ff9b5e05 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -215,6 +215,7 @@ static int memory_block_online(struct memory_block *mem)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
+ mem->zone = zone;
return ret;
}
@@ -225,6 +226,9 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
int ret;
+ if (!mem->zone)
+ return -EINVAL;
+
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
@@ -234,7 +238,7 @@ static int memory_block_offline(struct memory_block *mem)
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, mem->group);
+ nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
@@ -246,6 +250,7 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ mem->zone = NULL;
return ret;
}
@@ -411,11 +416,10 @@ static ssize_t valid_zones_show(struct device *dev,
*/
if (mem->state == MEM_ONLINE) {
/*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
*/
- default_zone = test_pages_in_a_zone(start_pfn,
- start_pfn + nr_pages);
+ default_zone = mem->zone;
if (!default_zone)
return sysfs_emit(buf, "%s\n", "none");
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
@@ -555,6 +559,8 @@ static ssize_t hard_offline_page_store(struct device *dev,
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret ? ret : count;
}
@@ -613,11 +619,7 @@ static const struct attribute_group *memory_memblk_attr_groups[] = {
NULL,
};
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
+static int __add_memory_block(struct memory_block *memory)
{
int ret;
@@ -641,9 +643,85 @@ int register_memory(struct memory_block *memory)
return ret;
}
-static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages,
- struct memory_group *group)
+static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
+ int nid)
+{
+ const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct zone *zone, *matching_zone = NULL;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int i;
+
+ /*
+ * This logic only works for early memory, when the applicable zones
+ * already span the memory block. We don't expect overlapping zones on
+ * a single node for early memory. So if we're told that some PFNs
+ * of a node fall into this memory block, we can assume that all node
+ * zones that intersect with the memory block are actually applicable.
+ * No need to look at the memmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+ if (!zone_intersects(zone, start_pfn, nr_pages))
+ continue;
+ if (!matching_zone) {
+ matching_zone = zone;
+ continue;
+ }
+ /* Spans multiple zones ... */
+ matching_zone = NULL;
+ break;
+ }
+ return matching_zone;
+}
+
+#ifdef CONFIG_NUMA
+/**
+ * memory_block_add_nid() - Indicate that system RAM falling into this memory
+ * block device (partially) belongs to the given node.
+ * @mem: The memory block device.
+ * @nid: The node id.
+ * @context: The memory initialization context.
+ *
+ * Indicate that system RAM falling into this memory block (partially) belongs
+ * to the given node. If the context indicates ("early") that we are adding the
+ * node during node device subsystem initialization, this will also properly
+ * set/adjust mem->zone based on the zone ranges of the given node.
+ */
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context)
+{
+ if (context == MEMINIT_EARLY && mem->nid != nid) {
+ /*
+ * For early memory we have to determine the zone when setting
+ * the node id and handle multiple nodes spanning a single
+ * memory block by indicate via zone == NULL that we're not
+ * dealing with a single zone. So if we're setting the node id
+ * the first time, determine if there is a single zone. If we're
+ * setting the node id a second time to a different node,
+ * invalidate the single detected zone.
+ */
+ if (mem->nid == NUMA_NO_NODE)
+ mem->zone = early_node_zone_for_memory_block(mem, nid);
+ else
+ mem->zone = NULL;
+ }
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
+}
+#endif
+
+static int add_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -663,17 +741,30 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->nr_vmemmap_pages = nr_vmemmap_pages;
INIT_LIST_HEAD(&mem->group_next);
+#ifndef CONFIG_NUMA
+ if (state == MEM_ONLINE)
+ /*
+ * MEM_ONLINE at this point implies early memory. With NUMA,
+ * we'll determine the zone when setting the node id via
+ * memory_block_add_nid(). Memory hotplug updated the zone
+ * manually when memory onlining/offlining succeeds.
+ */
+ mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
+#endif /* CONFIG_NUMA */
+
+ ret = __add_memory_block(mem);
+ if (ret)
+ return ret;
+
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
- ret = register_memory(mem);
-
- return ret;
+ return 0;
}
-static int add_memory_block(unsigned long base_section_nr)
+static int __init add_boot_memory_block(unsigned long base_section_nr)
{
int section_count = 0;
unsigned long nr;
@@ -685,11 +776,18 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0, NULL);
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, 0, NULL);
+}
+
+static int add_hotplug_memory_block(unsigned long block_id,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
}
-static void unregister_memory(struct memory_block *memory)
+static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
@@ -728,8 +826,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
- group);
+ ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
if (ret)
break;
}
@@ -740,7 +837,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
return ret;
@@ -769,7 +866,7 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
if (WARN_ON_ONCE(!mem))
continue;
unregister_memory_block_under_nodes(mem);
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
@@ -829,7 +926,7 @@ void __init memory_dev_init(void)
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- ret = add_memory_block(nr);
+ ret = add_boot_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 87acc47e8951..ec8bb24a5a22 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -796,15 +796,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk)
+ struct memory_block *mem_blk,
+ enum meminit_context context)
{
int ret;
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
- */
- mem_blk->nid = nid;
+ memory_block_add_nid(mem_blk, nid, context);
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
@@ -857,7 +854,7 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
return 0;
}
/* mem section does not span the specified node */
@@ -873,7 +870,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
return 0;
}
@@ -892,8 +889,9 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
- enum meminit_context context)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
walk_memory_blocks_func_t func;
@@ -1065,26 +1063,30 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
};
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
-static int __init register_node_type(void)
+void __init node_dev_init(void)
{
- int ret;
+ static struct notifier_block node_memory_callback_nb = {
+ .notifier_call = node_memory_callback,
+ .priority = NODE_CALLBACK_PRI,
+ };
+ int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
- if (!ret) {
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
- register_hotmemory_notifier(&node_memory_callback_nb);
- }
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
+
+ register_hotmemory_notifier(&node_memory_callback_nb);
/*
- * Note: we're not going to unregister the node class if we fail
- * to register the node state class attribute files.
+ * Create all node devices, which will properly link the node
+ * to applicable memory block devices and already created cpu devices.
*/
- return ret;
+ for_each_online_node(i) {
+ ret = register_one_node(i);
+ if (ret)
+ panic("%s() failed to add node: %d\n", __func__, ret);
+ }
}
-postcore_initcall(register_node_type);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5db704f02e71..1ee878d126fd 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -636,6 +636,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
+ /*
+ * The children must be in their deepest (powered-off) states to allow
+ * the parent to be powered off. Note that, there's no need for
+ * additional locking, as powering on a child, requires the parent's
+ * lock to be acquired first.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return -EBUSY;
+ }
+
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
@@ -1073,6 +1085,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Check that the children are in their deepest (powered-off) state. */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return;
+ }
+
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
if (_genpd_power_off(genpd, false))
@@ -2058,9 +2077,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
kfree(link);
}
- genpd_debug_remove(genpd);
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
+ genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
@@ -2248,12 +2267,8 @@ int of_genpd_add_provider_simple(struct device_node *np,
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
/*
* Save table for faster processing while setting performance
@@ -2312,9 +2327,8 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
+ dev_err_probe(&genpd->dev, ret,
+ "Failed to add OPP table for index %d\n", i);
goto error;
}
@@ -2672,12 +2686,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 04ea92cbd9cf..c50139207794 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1855,7 +1855,7 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
@@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
@@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2f3cce17219b..d4059e6ffeae 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0004db4a9d3b..d487a6bac630 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
- * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 99bda0da23a8..a57d469676ca 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
@@ -586,7 +587,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
+ * core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
@@ -732,7 +733,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
/*
* Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
+ * counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
@@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void)
atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b1905916f7af..b4df36c7b17d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -31,6 +31,7 @@ struct regmap_format {
size_t buf_size;
size_t reg_bytes;
size_t pad_bytes;
+ size_t reg_downshift;
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
@@ -62,6 +63,7 @@ struct regmap {
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
gfp_t alloc_flags;
+ unsigned int reg_base;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d2656581a608..400c7412a7dc 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret = regmap_write(map, reg, d->mask_buf[i]);
if (d->chip->clear_ack) {
if (d->chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- d->mask_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~d->mask_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
@@ -537,7 +535,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
/*
* Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
+ * there is no race between handling and acknowledging the
* interrupt. We assume that typically few of the interrupts
* will fire simultaneously so don't worry about overhead from
* doing a write per register.
@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- data->status_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~data->status_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] & d->mask_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- (d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~(d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -1053,7 +1045,7 @@ int devm_regmap_add_irq_chip_fwnode(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
*
* @dev: The device pointer on which irq_chip belongs to.
* @map: The regmap for the device.
@@ -1082,7 +1074,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
* devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
- * @dev: Device for which which resource was allocated.
+ * @dev: Device for which the resource was allocated.
* @irq: Primary IRQ for the device.
* @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
*
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8f9fe5fd4707..5e12f7cb5147 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -821,8 +821,11 @@ struct regmap *__regmap_init(struct device *dev,
else
map->alloc_flags = GFP_KERNEL;
+ map->reg_base = config->reg_base;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
+ map->format.reg_downshift = config->reg_downshift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
@@ -1735,6 +1738,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
@@ -1905,6 +1910,8 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
@@ -2346,6 +2353,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -2673,6 +2682,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fc24e89f9592..e9d1efcda89b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -14,11 +14,11 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_id_show_func(name) \
+#define define_id_show_func(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \
+ return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
}
#define define_siblings_read_func(name, mask) \
@@ -42,22 +42,25 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
off, count); \
}
-define_id_show_func(physical_package_id);
+define_id_show_func(physical_package_id, "%d");
static DEVICE_ATTR_RO(physical_package_id);
#ifdef TOPOLOGY_DIE_SYSFS
-define_id_show_func(die_id);
+define_id_show_func(die_id, "%d");
static DEVICE_ATTR_RO(die_id);
#endif
#ifdef TOPOLOGY_CLUSTER_SYSFS
-define_id_show_func(cluster_id);
+define_id_show_func(cluster_id, "%d");
static DEVICE_ATTR_RO(cluster_id);
#endif
-define_id_show_func(core_id);
+define_id_show_func(core_id, "%d");
static DEVICE_ATTR_RO(core_id);
+define_id_show_func(ppin, "0x%llx");
+static DEVICE_ATTR_ADMIN_RO(ppin);
+
define_siblings_read_func(thread_siblings, sibling_cpumask);
static BIN_ATTR_RO(thread_siblings, 0);
static BIN_ATTR_RO(thread_siblings_list, 0);
@@ -87,7 +90,7 @@ static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
#ifdef TOPOLOGY_BOOK_SYSFS
-define_id_show_func(book_id);
+define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
static BIN_ATTR_RO(book_siblings, 0);
@@ -95,7 +98,7 @@ static BIN_ATTR_RO(book_siblings_list, 0);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
-define_id_show_func(drawer_id);
+define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
static BIN_ATTR_RO(drawer_siblings, 0);
@@ -145,6 +148,7 @@ static struct attribute *default_attrs[] = {
#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
+ &dev_attr_ppin.attr,
NULL
};
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 52484bcdedb9..8a91fcac6f82 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -12,7 +12,6 @@
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
-#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6af111f568e4..384073ef2323 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -10,7 +10,6 @@
#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/genhd.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
@@ -1019,9 +1018,9 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter.bi_size = cnt;
__bio_for_each_segment(bv, bio, iter, iter) {
- char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
+ char *p = bvec_kmap_local(&bv);
skb_copy_bits(skb, soff, p, bv.bv_len);
- kunmap_atomic(p);
+ kunmap_local(p);
soff += bv.bv_len;
}
}
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 72cf7603d51f..f5bcded3640d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
- bio_set_dev(bio, bdev->md_bdev);
+ bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio_set_op_attrs(bio, op, op_flags);
if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index c1f816f896a8..df25eecf80af 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
+ GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
- bio_set_op_attrs(bio, op, 0);
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f27d5b0f9a0b..4b55e864a0a3 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -27,7 +27,6 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/dynamic_debug.h>
#include <net/tcp.h>
@@ -638,9 +637,6 @@ enum {
STATE_SENT, /* Do not change state/UUIDs while this is set */
CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
* pending, from drbd worker context.
- * If set, bdi_write_congested() returns true,
- * so shrink_page_list() would not recurse into,
- * and potentially deadlock on, this drbd worker.
*/
DISCONNECT_SENT,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6df2539e215b..fa00cf2ea952 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1279,16 +1279,16 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
+ REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
- if (!bio || !octx) {
- drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
+
+ if (!octx) {
+ drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
/* FIXME: what else can I do now? disconnecting or detaching
* really does not help to improve the state of the world, either.
*/
- kfree(octx);
- if (bio)
- bio_put(bio);
+ bio_put(bio);
ctx->error = -ENOMEM;
put_ldev(device);
@@ -1298,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1646,7 +1644,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
unsigned data_size = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- int err = -ENOMEM;
/* TRIM/DISCARD: for now, always use the helper function
* blkdev_issue_zeroout(..., discard=true).
@@ -1687,15 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(GFP_NOIO, nr_pages);
- if (!bio) {
- drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
- goto fail;
- }
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
+ GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, device->ldev->backing_bdev);
- bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1726,14 +1718,6 @@ next_bio:
drbd_submit_bio_noacct(device, fault_type, bio);
} while (bios);
return 0;
-
-fail:
- while (bios) {
- bio = bios;
- bios = bios->bi_next;
- bio_put(bio);
- }
- return err;
}
static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
@@ -2033,10 +2017,10 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, iter) {
- void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+ void *mapped = bvec_kmap_local(&bvec);
expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
- kunmap(bvec.bv_page);
+ kunmap_local(mapped);
if (err)
return err;
data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3235532ae077..2f608525a879 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,7 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
+ req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
+ GFP_NOIO, &drbd_io_bio_set);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
@@ -909,8 +910,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- return bdi_read_congested(
- device->ldev->backing_bdev->bd_disk->bdi);
+ return 0;
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
@@ -1151,8 +1151,6 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio_set_dev(bio, device->ldev->backing_bdev);
-
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 64563bfdf0da..1b48c8172a07 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -326,9 +326,9 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
bio_for_each_segment(bvec, bio, iter) {
u8 *src;
- src = kmap_atomic(bvec.bv_page);
- crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
- kunmap_atomic(src);
+ src = bvec_kmap_local(&bvec);
+ crypto_shash_update(desc, src, bvec.bv_len);
+ kunmap_local(src);
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
@@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
- req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+ req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
+ req->master_bio, GFP_NOIO,
&drbd_io_bio_set);
- bio_set_dev(req->private_bio, device->ldev->backing_bdev);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e611411a934c..8c647532e3ce 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2485,11 +2485,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
}
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
- memcpy_to_page(bv.bv_page, bv.bv_offset, dma_buffer,
- size);
+ memcpy_to_bvec(&bv, dma_buffer);
else
- memcpy_from_page(dma_buffer, bv.bv_page, bv.bv_offset,
- size);
+ memcpy_from_bvec(dma_buffer, &bv);
remaining -= size;
dma_buffer += size;
@@ -4129,15 +4127,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
- bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 01cbbfc4e9e2..3e636a75c83a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -79,12 +79,14 @@
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
+#include <linux/statfs.h>
#include "loop.h"
#include <linux/uaccess.h>
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
+#define LOOP_DEFAULT_HW_Q_DEPTH (128)
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
@@ -308,12 +310,11 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
- struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
- if (!blk_queue_discard(q)) {
+ if (!blk_queue_discard(lo->lo_queue)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -327,8 +328,7 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
static int lo_req_flush(struct loop_device *lo, struct request *rq)
{
- struct file *file = lo->lo_backing_file;
- int ret = vfs_fsync(file, 0);
+ int ret = vfs_fsync(lo->lo_backing_file, 0);
if (unlikely(ret && ret != -EINVAL))
ret = -EIO;
@@ -680,33 +680,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
}
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
}
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
{
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
- return sprintf(buf, "%s\n", autoclear ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
}
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
{
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
- return sprintf(buf, "%s\n", partscan ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
}
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
{
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
- return sprintf(buf, "%s\n", dio ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
}
LOOP_ATTR_RO(backing_file);
@@ -774,8 +774,13 @@ static void loop_config_discard(struct loop_device *lo)
granularity = 0;
} else {
+ struct kstatfs sbuf;
+
max_discard_sectors = UINT_MAX >> 9;
- granularity = inode->i_sb->s_blocksize;
+ if (!vfs_statfs(&file->f_path, &sbuf))
+ granularity = sbuf.f_bsize;
+ else
+ max_discard_sectors = 0;
}
if (max_discard_sectors) {
@@ -1082,7 +1087,7 @@ out_putf:
return error;
}
-static void __loop_clr_fd(struct loop_device *lo)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
@@ -1144,6 +1149,8 @@ static void __loop_clr_fd(struct loop_device *lo)
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
@@ -1151,52 +1158,44 @@ static void __loop_clr_fd(struct loop_device *lo)
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
- mutex_lock(&lo->lo_disk->open_mutex);
+ /*
+ * open_mutex has been held already in release path, so don't
+ * acquire it if this function is called in such case.
+ *
+ * If the reread partition isn't from release path, lo_refcnt
+ * must be at least one and it can only become zero when the
+ * current holder is released.
+ */
+ if (!release)
+ mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- mutex_unlock(&lo->lo_disk->open_mutex);
+ if (!release)
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
}
+ /*
+ * lo->lo_state is set to Lo_unbound here after above partscan has
+ * finished. There cannot be anybody else entering __loop_clr_fd() as
+ * Lo_rundown state protects us from all the other places trying to
+ * change the 'lo' device.
+ */
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART;
-
- fput(filp);
-}
-
-static void loop_rundown_completed(struct loop_device *lo)
-{
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
- module_put(THIS_MODULE);
-}
-static void loop_rundown_workfn(struct work_struct *work)
-{
- struct loop_device *lo = container_of(work, struct loop_device,
- rundown_work);
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
-
- __loop_clr_fd(lo);
- kobject_put(&bdev->bd_device.kobj);
- module_put(disk->fops->owner);
- loop_rundown_completed(lo);
-}
-
-static void loop_schedule_rundown(struct loop_device *lo)
-{
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
-
- __module_get(disk->fops->owner);
- kobject_get(&bdev->bd_device.kobj);
- INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
- queue_work(system_long_wq, &lo->rundown_work);
+ /*
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
+ * lo_mutex triggers a circular lock dependency possibility warning as
+ * fput can take open_mutex which is usually taken before lo_mutex.
+ */
+ fput(filp);
}
static int loop_clr_fd(struct loop_device *lo)
@@ -1228,8 +1227,7 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
- __loop_clr_fd(lo);
- loop_rundown_completed(lo);
+ __loop_clr_fd(lo, false);
return 0;
}
@@ -1262,7 +1260,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
/* If any pages were dirtied after invalidate_bdev(), try again */
err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
@@ -1482,7 +1480,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
/* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
@@ -1754,7 +1752,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- loop_schedule_rundown(lo);
+ __loop_clr_fd(lo, true);
return;
} else if (lo->lo_state == Lo_bound) {
/*
@@ -1787,6 +1785,24 @@ module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
+
+static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
+
+static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
+{
+ int ret = kstrtoint(s, 10, &hw_queue_depth);
+
+ return (ret || (hw_queue_depth < 1)) ? -EINVAL : 0;
+}
+
+static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
+ .set = loop_set_hw_queue_depth,
+ .get = param_get_int,
+};
+
+device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 128");
+
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1981,7 +1997,7 @@ static int loop_add(int i)
lo->tag_set.ops = &loop_mq_ops;
lo->tag_set.nr_hw_queues = 1;
- lo->tag_set.queue_depth = 128;
+ lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
@@ -2075,6 +2091,7 @@ static void loop_remove(struct loop_device *lo)
del_gendisk(lo->lo_disk);
blk_cleanup_disk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
+
mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
mutex_unlock(&loop_ctl_mutex);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 918a7a2dc025..082d4b6bfc6a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -56,7 +56,6 @@ struct loop_device {
struct gendisk *lo_disk;
struct mutex lo_mutex;
bool idr_visible;
- struct work_struct rundown_work;
};
struct loop_cmd {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index e6005c232328..4fbaf0b4958b 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -19,7 +19,6 @@
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
@@ -161,9 +160,7 @@ static bool mtip_check_surprise_removal(struct driver_data *dd)
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
- struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
-
- return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
+ return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(dd->tags.tags[0], tag));
}
/*
@@ -4112,7 +4109,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_set_queue_dying(dd->queue);
+ blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 88f4206310e4..6816beb45352 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -15,7 +15,6 @@
#include <linux/rwsem.h>
#include <linux/ata.h>
#include <linux/interrupt.h>
-#include <linux/genhd.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 13004beb48ca..05b1120e6623 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -431,9 +431,10 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
return count;
- if (null_add_dev(dev)) {
+ ret = null_add_dev(dev);
+ if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- return -ENOMEM;
+ return ret;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@@ -719,26 +720,25 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
return NULL;
}
-static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
{
struct nullb_cmd *cmd;
DEFINE_WAIT(wait);
- cmd = __alloc_cmd(nq);
- if (cmd || !can_wait)
- return cmd;
-
do {
- prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+ /*
+ * This avoids multiple return statements, multiple calls to
+ * __alloc_cmd() and a fast path call to prepare_to_wait().
+ */
cmd = __alloc_cmd(nq);
- if (cmd)
- break;
-
+ if (cmd) {
+ cmd->bio = bio;
+ return cmd;
+ }
+ prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
io_schedule();
+ finish_wait(&nq->wait, &wait);
} while (1);
-
- finish_wait(&nq->wait, &wait);
- return cmd;
}
static void end_cmd(struct nullb_cmd *cmd)
@@ -777,24 +777,22 @@ static void null_complete_rq(struct request *rq)
end_cmd(blk_mq_rq_to_pdu(rq));
}
-static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+static struct nullb_page *null_alloc_page(void)
{
struct nullb_page *t_page;
- t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
if (!t_page)
- goto out;
+ return NULL;
- t_page->page = alloc_pages(gfp_flags, 0);
- if (!t_page->page)
- goto out_freepage;
+ t_page->page = alloc_pages(GFP_NOIO, 0);
+ if (!t_page->page) {
+ kfree(t_page);
+ return NULL;
+ }
memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page;
-out_freepage:
- kfree(t_page);
-out:
- return NULL;
}
static void null_free_page(struct nullb_page *t_page)
@@ -932,7 +930,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_unlock_irq(&nullb->lock);
- t_page = null_alloc_page(GFP_NOIO);
+ t_page = null_alloc_page();
if (!t_page)
goto out_lock;
@@ -1476,12 +1474,8 @@ static void null_submit_bio(struct bio *bio)
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
- struct nullb_cmd *cmd;
-
- cmd = alloc_cmd(nq, 1);
- cmd->bio = bio;
- null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
+ null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
}
static bool should_timeout_request(struct request *rq)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2b6b70a39e76..e745fc29e55d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_reset(bio);
+ bio_reset(bio, pd->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- bio_reset(pkt->w_bio);
+ bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
@@ -2298,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
+ struct bio *cloned_bio =
+ bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
psd->bio = bio;
- bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2404,18 +2400,11 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
- struct pktcdvd_device *pd;
- char b[BDEVNAME_SIZE];
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct bio *split;
blk_queue_split(&bio);
- pd = bio->bi_bdev->bd_disk->queue->queuedata;
- if (!pd) {
- pr_err("%s incorrect request queue\n", bio_devname(bio, b));
- goto end_io;
- }
-
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
(unsigned long long)bio_end_sector(bio));
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4203cdab8abf..b844432bad20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
* IO to complete/fail.
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
- blk_set_queue_dying(rbd_dev->disk->queue);
+ blk_mark_disk_dead(rbd_dev->disk);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c08971de369f..2f378684b735 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -23,7 +23,6 @@ MODULE_LICENSE("GPL");
static int rnbd_client_major;
static DEFINE_IDA(index_ida);
-static DEFINE_MUTEX(ida_lock);
static DEFINE_MUTEX(sess_lock);
static LIST_HEAD(sess_list);
@@ -55,9 +54,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
if (!refcount_dec_and_test(&dev->refcount))
return;
- mutex_lock(&ida_lock);
- ida_simple_remove(&index_ida, dev->clt_device_id);
- mutex_unlock(&ida_lock);
+ ida_free(&index_ida, dev->clt_device_id);
kfree(dev->hw_queues);
kfree(dev->pathname);
rnbd_clt_put_sess(dev->sess);
@@ -87,7 +84,6 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
- dev->rotational = rsp->rotational;
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->fua = !!(rsp->cache_policy & RNBD_FUA);
@@ -1262,9 +1258,9 @@ find_and_get_or_create_sess(const char *sessname,
struct rtrs_clt_ops rtrs_ops;
sess = find_or_create_sess(sessname, &first);
- if (sess == ERR_PTR(-ENOMEM))
+ if (sess == ERR_PTR(-ENOMEM)) {
return ERR_PTR(-ENOMEM);
- else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
+ } else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
/*
* A device MUST have its own session to use the polling-mode.
* It must fail to map new device with the same session.
@@ -1343,7 +1339,7 @@ static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
{
- int i;
+ unsigned long i;
struct blk_mq_hw_ctx *hctx;
struct rnbd_queue *q;
@@ -1410,8 +1406,10 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->read_only = false;
}
- if (!dev->rotational)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
+ /*
+ * Network device does not need rotational
+ */
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
blk_cleanup_disk(dev->gd);
@@ -1459,10 +1457,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc;
}
- mutex_lock(&ida_lock);
- ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
- GFP_KERNEL);
- mutex_unlock(&ida_lock);
+ ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+ GFP_KERNEL);
if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret);
@@ -1610,13 +1606,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
+ dev->max_hw_sectors, dev->wc, dev->fua);
mutex_unlock(&dev->lock);
rnbd_clt_put_sess(sess);
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 0c2cae7f39b9..62bf7c3fa63c 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -118,7 +118,6 @@ struct rnbd_clt_dev {
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
bool read_only;
- bool rotational;
bool wc;
bool fua;
u32 max_hw_sectors;
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index de5d5a8df81d..c4a68b3a1cbe 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -128,7 +128,7 @@ enum rnbd_cache_policy {
* @logical_block_size: logical block size device supports in bytes
* @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard
- * @rotation: is a rotational disc?
+ * @obsolete_rotational: obsolete, not in used.
* @cache_policy: support write-back caching or FUA?
*/
struct rnbd_msg_open_rsp {
@@ -144,7 +144,7 @@ struct rnbd_msg_open_rsp {
__le16 logical_block_size;
__le16 max_segments;
__le16 secure_discard;
- u8 rotational;
+ u8 obsolete_rotational;
u8 cache_policy;
u8 reserved[10];
};
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index b241a099aeae..c5d0a0391165 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -12,8 +12,7 @@
#include "rnbd-srv-dev.h"
#include "rnbd-log.h"
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs)
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
{
struct rnbd_dev *dev;
int ret;
@@ -30,7 +29,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
dev->blk_open_flags = flags;
bdevname(dev->bdev, dev->name);
- dev->ibd_bio_set = bs;
return dev;
@@ -44,60 +42,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
blkdev_put(dev->bdev, dev->blk_open_flags);
kfree(dev);
}
-
-void rnbd_dev_bi_end_io(struct bio *bio)
-{
- struct rnbd_dev_blk_io *io = bio->bi_private;
-
- rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
- bio_put(bio);
-}
-
-/**
- * rnbd_bio_map_kern - map kernel address into bio
- * @data: pointer to buffer to map
- * @bs: bio_set to use.
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask)
-{
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int offset, i;
- struct bio *bio;
-
- bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- offset = offset_in_page(kaddr);
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
-
- if (bio_add_page(bio, virt_to_page(data), bytes,
- offset) < bytes) {
- /* we don't support partial mappings */
- bio_put(bio);
- return ERR_PTR(-EINVAL);
- }
-
- data += bytes;
- len -= bytes;
- offset = 0;
- }
-
- return bio;
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 0eb23850afb9..2c3df02b5e8e 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -14,25 +14,16 @@
struct rnbd_dev {
struct block_device *bdev;
- struct bio_set *ibd_bio_set;
fmode_t blk_open_flags;
char name[BDEVNAME_SIZE];
};
-struct rnbd_dev_blk_io {
- struct rnbd_dev *dev;
- void *priv;
- /* have to be last member for front_pad usage of bioset_init */
- struct bio bio;
-};
-
/**
* rnbd_dev_open() - Open a device
+ * @path: path to open
* @flags: open flags
- * @bs: bio_set to use during block io,
*/
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs);
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
/**
* rnbd_dev_close() - Close a device
@@ -41,11 +32,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error);
-void rnbd_dev_bi_end_io(struct bio *bio);
-
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask);
-
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{
return queue_max_segments(bdev_get_queue(dev->bdev));
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index 4db98e0e76f0..feaa76c5a342 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -13,7 +13,6 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
-#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 1ee808fc600c..6499efae5c43 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -114,6 +114,12 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
return sess_dev;
}
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rtrs_srv_op *id, void *data, u32 datalen,
const void *usr, size_t usrlen)
@@ -123,7 +129,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id;
int err;
- struct rnbd_dev_blk_io *io;
struct bio *bio;
short prio;
@@ -144,33 +149,29 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- /* Generate bio with pages pointing to the rdma buffer */
- bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
- goto sess_dev_put;
+ bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
+ rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
+ if (bio_add_page(bio, virt_to_page(data), datalen,
+ offset_in_page(data)) != datalen) {
+ rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
+ err = -EINVAL;
+ goto bio_put;
}
- io = container_of(bio, struct rnbd_dev_blk_io, bio);
- io->dev = sess_dev->rnbd_dev;
- io->priv = priv;
-
bio->bi_end_io = rnbd_dev_bi_end_io;
- bio->bi_private = io;
- bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+ bio->bi_private = priv;
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
- bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio);
return 0;
-sess_dev_put:
+bio_put:
+ bio_put(bio);
rnbd_put_sess_dev(sess_dev);
err:
kfree(priv);
@@ -251,7 +252,6 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
out:
xa_destroy(&srv_sess->index_idr);
- bioset_exit(&srv_sess->sess_bio_set);
pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
@@ -280,16 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
return -ENOMEM;
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
- err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
- offsetof(struct rnbd_dev_blk_io, bio),
- BIOSET_NEED_BVECS);
- if (err) {
- pr_err("Allocating srv_session for path %s failed\n",
- pathname);
- kfree(srv_sess);
- return err;
- }
-
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
@@ -568,7 +558,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
- rsp->rotational = !blk_queue_nonrot(q);
rsp->cache_policy = 0;
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rsp->cache_policy |= RNBD_WRITEBACK;
@@ -738,8 +727,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- rnbd_dev = rnbd_dev_open(full_path, open_flags,
- &srv_sess->sess_bio_set);
+ rnbd_dev = rnbd_dev_open(full_path, open_flags);
if (IS_ERR(rnbd_dev)) {
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index e5604bce123a..be2ae486d407 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -23,7 +23,6 @@ struct rnbd_srv_session {
struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
- struct bio_set sess_bio_set;
struct xarray index_idr;
/* List of struct rnbd_srv_sess_dev */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 146d85d80e0e..dd0a1a6fed29 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
-#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c443cd64fc9b..a8bcf3f664af 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -69,16 +69,6 @@ struct virtio_blk {
/* Process context for config space updates */
struct work_struct config_work;
- /*
- * Tracks references from block_device_operations open/release and
- * virtio_driver probe/remove so this object can be freed once no
- * longer in use.
- */
- refcount_t refs;
-
- /* What host tells us, plus 2 for header & tailer. */
- unsigned int sg_elems;
-
/* Ida index - used to track minor number allocations. */
int index;
@@ -322,8 +312,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t status;
int err;
- BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
@@ -391,43 +379,6 @@ out:
return err;
}
-static void virtblk_get(struct virtio_blk *vblk)
-{
- refcount_inc(&vblk->refs);
-}
-
-static void virtblk_put(struct virtio_blk *vblk)
-{
- if (refcount_dec_and_test(&vblk->refs)) {
- ida_simple_remove(&vd_index_ida, vblk->index);
- mutex_destroy(&vblk->vdev_mutex);
- kfree(vblk);
- }
-}
-
-static int virtblk_open(struct block_device *bd, fmode_t mode)
-{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
- int ret = 0;
-
- mutex_lock(&vblk->vdev_mutex);
-
- if (vblk->vdev)
- virtblk_get(vblk);
- else
- ret = -ENXIO;
-
- mutex_unlock(&vblk->vdev_mutex);
- return ret;
-}
-
-static void virtblk_release(struct gendisk *disk, fmode_t mode)
-{
- struct virtio_blk *vblk = disk->private_data;
-
- virtblk_put(vblk);
-}
-
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
@@ -460,11 +411,19 @@ out:
return ret;
}
+static void virtblk_free_disk(struct gendisk *disk)
+{
+ struct virtio_blk *vblk = disk->private_data;
+
+ ida_simple_remove(&vd_index_ida, vblk->index);
+ mutex_destroy(&vblk->vdev_mutex);
+ kfree(vblk);
+}
+
static const struct block_device_operations virtblk_fops = {
- .owner = THIS_MODULE,
- .open = virtblk_open,
- .release = virtblk_release,
- .getgeo = virtblk_getgeo,
+ .owner = THIS_MODULE,
+ .getgeo = virtblk_getgeo,
+ .free_disk = virtblk_free_disk,
};
static int index_to_minor(int index)
@@ -783,20 +742,15 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
- /* We need extra sg elements at head and tail. */
- sg_elems += 2;
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
- /* This reference is dropped in virtblk_remove(). */
- refcount_set(&vblk->refs, 1);
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
- vblk->sg_elems = sg_elems;
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@@ -853,7 +807,7 @@ static int virtblk_probe(struct virtio_device *vdev)
set_disk_ro(vblk->disk, 1);
/* We can handle whatever the host told us to handle. */
- blk_queue_max_segments(q, vblk->sg_elems-2);
+ blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
@@ -925,9 +879,15 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&v);
+
+ /*
+ * max_discard_seg == 0 is out of spec but we always
+ * handled it.
+ */
+ if (!v)
+ v = sg_elems;
blk_queue_max_discard_segments(q,
- min_not_zero(v,
- MAX_DISCARD_SEGMENTS));
+ min(v, MAX_DISCARD_SEGMENTS));
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
@@ -970,7 +930,7 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
- blk_cleanup_disk(vblk->disk);
+ blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
@@ -986,7 +946,7 @@ static void virtblk_remove(struct virtio_device *vdev)
mutex_unlock(&vblk->vdev_mutex);
- virtblk_put(vblk);
+ put_disk(vblk->disk);
}
#ifdef CONFIG_PM_SLEEP
@@ -1060,7 +1020,7 @@ static struct virtio_driver virtio_blk = {
#endif
};
-static int __init init(void)
+static int __init virtio_blk_init(void)
{
int error;
@@ -1086,14 +1046,14 @@ out_destroy_workqueue:
return error;
}
-static void __exit fini(void)
+static void __exit virtio_blk_fini(void)
{
unregister_virtio_driver(&virtio_blk);
unregister_blkdev(major, "virtblk");
destroy_workqueue(virtblk_wq);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_blk_init);
+module_exit(virtio_blk_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 14e452896d04..d1e26461a64e 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1326,16 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
+ bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
+ operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
- bio_set_op_attrs(bio, operation, operation_flags);
}
preq.sector_number += seg[i].nsec;
@@ -1345,15 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH);
- bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
+ bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio_set_op_attrs(bio, operation, operation_flags);
}
atomic_set(&pending_req->pendcnt, nbio);
@@ -1381,14 +1374,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
free_req(ring, pending_req);
msleep(1); /* back off a bit */
return -EIO;
-
- fail_put_bio:
- for (i = 0; i < nbio; i++)
- bio_put(biolist[i]);
- atomic_set(&pending_req->pendcnt, 1);
- __end_block_io_op(pending_req, BLK_STS_RESOURCE);
- msleep(1); /* back off a bit */
- return -EIO;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 62125fd4af4a..f09040435e2e 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/kthread.h>
+#include <linux/pagemap.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include "common.h"
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ccd0dd0c6b83..85fc550508cc 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1288,7 +1288,8 @@ free_shadow:
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
+ free_pages_exact(rinfo->ring.sring,
+ info->nr_ring_pages * XEN_PAGE_SIZE);
rinfo->ring.sring = NULL;
if (rinfo->irq)
@@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
return BLKIF_RSP_OKAY;
}
-static bool blkif_completion(unsigned long *id,
- struct blkfront_ring_info *rinfo,
- struct blkif_response *bret)
+/*
+ * Return values:
+ * 1 response processed.
+ * 0 missing further responses.
+ * -1 error while processing.
+ */
+static int blkif_completion(unsigned long *id,
+ struct blkfront_ring_info *rinfo,
+ struct blkif_response *bret)
{
int i = 0;
struct scatterlist *sg;
@@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status < REQ_DONE)
- return false;
+ return 0;
bret->status = blkif_get_final_status(s->status,
s2->status);
@@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < num_grant; i++) {
- if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+ if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
/*
* If the grant is still mapped by the backend (the
* backend has chosen to make this grant persistent)
* we add it at the head of the list, so it will be
* reused first.
*/
- if (!info->feature_persistent)
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
- s->grants_used[i]->gref);
+ if (!info->feature_persistent) {
+ pr_alert("backed has not unmapped grant: %u\n",
+ s->grants_used[i]->gref);
+ return -1;
+ }
list_add(&s->grants_used[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
/*
- * If the grant is not mapped by the backend we end the
- * foreign access and add it to the tail of the list,
- * so it will not be picked again unless we run out of
- * persistent grants.
+ * If the grant is not mapped by the backend we add it
+ * to the tail of the list, so it will not be picked
+ * again unless we run out of persistent grants.
*/
- gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
s->grants_used[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
}
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
- if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
- if (!info->feature_persistent)
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
- s->indirect_grants[i]->gref);
+ if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
+ if (!info->feature_persistent) {
+ pr_alert("backed has not unmapped grant: %u\n",
+ s->indirect_grants[i]->gref);
+ return -1;
+ }
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
struct page *indirect_page;
- gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
/*
* Add the used indirect page back to the list of
* available pages for indirect grefs.
@@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
}
}
- return true;
+ return 1;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
}
if (bret.operation != BLKIF_OP_DISCARD) {
+ int ret;
+
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
- if (!blkif_completion(&id, rinfo, &bret))
+ ret = blkif_completion(&id, rinfo, &bret);
+ if (!ret)
continue;
+ if (unlikely(ret < 0))
+ goto err;
}
if (add_id_to_freelist(rinfo, id)) {
@@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev,
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
- sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
- get_order(ring_size));
+ sring = alloc_pages_exact(ring_size, GFP_NOIO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
@@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
if (err < 0) {
- free_pages((unsigned long)sring, get_order(ring_size));
+ free_pages_exact(sring, ring_size);
rinfo->ring.sring = NULL;
goto fail;
}
@@ -2126,7 +2138,7 @@ static void blkfront_closing(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- blk_set_queue_dying(info->rq);
+ blk_mark_disk_dead(info->gd);
set_capacity(info->gd, 0);
for_each_rinfo(info, rinfo, i) {
@@ -2521,6 +2533,7 @@ static void purge_persistent_grants(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
+ LIST_HEAD(grants);
spin_lock_irqsave(&rinfo->ring_lock, flags);
@@ -2532,16 +2545,17 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
node) {
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
- gnttab_query_foreign_access(gnt_list_entry->gref))
+ !gnttab_try_end_foreign_access(gnt_list_entry->gref))
continue;
list_del(&gnt_list_entry->node);
- gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--;
gnt_list_entry->gref = GRANT_INVALID_REF;
- list_add_tail(&gnt_list_entry->node, &rinfo->grants);
+ list_add_tail(&gnt_list_entry->node, &grants);
}
+ list_splice_tail(&grants, &rinfo->grants);
+
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
}
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index cb253d80d72b..e9474b02012d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -22,7 +22,6 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
@@ -617,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
{
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
+ GFP_NOIO);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- bio_set_dev(bio, zram->bdev);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio_put(bio);
return -EIO;
}
- if (!parent) {
- bio->bi_opf = REQ_OP_READ;
+ if (!parent)
bio->bi_end_io = zram_page_end_io;
- } else {
- bio->bi_opf = parent->bi_opf;
+ else
bio_chain(bio, parent);
- }
submit_bio(bio);
return 1;
@@ -747,10 +743,9 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, zram->bdev);
+ bio_init(&bio, zram->bdev, &bio_vec, 1,
+ REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset);
@@ -1336,12 +1331,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
goto out;
if (is_partial_io(bvec)) {
- void *dst = kmap_atomic(bvec->bv_page);
void *src = kmap_atomic(page);
- memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
+ memcpy_to_bvec(bvec, src + offset);
kunmap_atomic(src);
- kunmap_atomic(dst);
}
out:
if (is_partial_io(bvec))
@@ -1472,7 +1465,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret;
struct page *page = NULL;
- void *src;
struct bio_vec vec;
vec = *bvec;
@@ -1490,11 +1482,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (ret)
goto out;
- src = kmap_atomic(bvec->bv_page);
dst = kmap_atomic(page);
- memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
+ memcpy_from_bvec(dst + offset, bvec);
kunmap_atomic(dst);
- kunmap_atomic(src);
vec.bv_page = page;
vec.bv_len = PAGE_SIZE;
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 3a258a677df8..b79895810c52 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
.config = &modem_mv31_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
};
static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index fd87a59837fa..5eb0fe73ddc4 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi)
return 0;
}
-static int moxtet_remove(struct spi_device *spi)
+static void moxtet_remove(struct spi_device *spi)
{
struct moxtet *moxtet = spi_get_drvdata(spi);
@@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi)
device_for_each_child(moxtet->dev, NULL, __unregister);
mutex_destroy(&moxtet->lock);
-
- return 0;
}
static const struct of_device_id moxtet_dt_ids[] = {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index faead41709bc..8e78b37d0f6a 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/cdrom.h>
-#include <linux/genhd.h>
#include <linux/bio.h>
#include <linux/blk-mq.h>
#include <linux/interrupt.h>
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9704963f9d50..a087156a5818 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -401,7 +401,7 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && ARM64
+ depends on HW_RANDOM && PCI && ARCH_THUNDER
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index ecb71c4317a5..b8effe77d80f 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -13,13 +13,16 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define TRNG_CR 0x00
#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
+#define TRNG_ISR_DATRDY BIT(0)
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
@@ -34,37 +37,79 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ bool has_half_rate;
};
+static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait)
+{
+ int ready;
+
+ ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY;
+ if (!ready && wait)
+ readl_poll_timeout(trng->base + TRNG_ISR, ready,
+ ready & TRNG_ISR_DATRDY, 1000, 20000);
+
+ return !!ready;
+}
+
static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
u32 *data = buf;
+ int ret;
- /* data ready? */
- if (readl(trng->base + TRNG_ISR) & 1) {
- *data = readl(trng->base + TRNG_ODATA);
- /*
- ensure data ready is only set again AFTER the next data
- word is ready in case it got set between checking ISR
- and reading ODATA, so we don't risk re-reading the
- same word
- */
- readl(trng->base + TRNG_ISR);
- return 4;
- } else
- return 0;
+ ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ if (ret < 0) {
+ pm_runtime_put_sync((struct device *)trng->rng.priv);
+ return ret;
+ }
+
+ ret = atmel_trng_wait_ready(trng, wait);
+ if (!ret)
+ goto out;
+
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ * ensure data ready is only set again AFTER the next data word is ready
+ * in case it got set between checking ISR and reading ODATA, so we
+ * don't risk re-reading the same word
+ */
+ readl(trng->base + TRNG_ISR);
+ ret = 4;
+
+out:
+ pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ return ret;
}
-static void atmel_trng_enable(struct atmel_trng *trng)
+static int atmel_trng_init(struct atmel_trng *trng)
{
+ unsigned long rate;
+ int ret;
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ if (trng->has_half_rate) {
+ rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
+
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+
+ return 0;
}
-static void atmel_trng_disable(struct atmel_trng *trng)
+static void atmel_trng_cleanup(struct atmel_trng *trng)
{
writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable_unprepare(trng->clk);
}
static int atmel_trng_probe(struct platform_device *pdev)
@@ -88,32 +133,31 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- if (data->has_half_rate) {
- unsigned long rate = clk_get_rate(trng->clk);
-
- /* if peripheral clk is above 100MHz, set HALFR */
- if (rate > 100000000)
- writel(TRNG_HALFR, trng->base + TRNG_MR);
- }
-
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
+ trng->has_half_rate = data->has_half_rate;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
+ trng->rng.priv = (unsigned long)&pdev->dev;
+ platform_set_drvdata(pdev, trng);
- ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+#ifndef CONFIG_PM
+ ret = atmel_trng_init(trng);
if (ret)
- goto err_register;
+ return ret;
+#endif
- platform_set_drvdata(pdev, trng);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- return 0;
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#ifndef CONFIG_PM
+ atmel_trng_cleanup(trng);
+#endif
+ }
-err_register:
- clk_disable_unprepare(trng->clk);
return ret;
}
@@ -121,43 +165,35 @@ static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
-
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int atmel_trng_suspend(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
return 0;
}
-static int atmel_trng_resume(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
-
- return 0;
+ return atmel_trng_init(trng);
}
-static const struct dev_pm_ops atmel_trng_pm_ops = {
- .suspend = atmel_trng_suspend,
- .resume = atmel_trng_resume,
+static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend,
+ atmel_trng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#endif /* CONFIG_PM */
static const struct atmel_trng_data at91sam9g45_config = {
.has_half_rate = false,
@@ -185,9 +221,7 @@ static struct platform_driver atmel_trng_driver = {
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
-#ifdef CONFIG_PM
- .pm = &atmel_trng_pm_ops,
-#endif /* CONFIG_PM */
+ .pm = pm_ptr(&atmel_trng_pm_ops),
.of_match_table = atmel_trng_dt_ids,
},
};
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 6f66919652bf..7c55f4cf4a8b 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -179,7 +179,7 @@ static int cavium_map_pf_regs(struct cavium_rng *rng)
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_CAVIUM_RNG_PF, NULL);
if (!pdev) {
- dev_err(&pdev->dev, "Cannot find RNG PF device\n");
+ pr_err("Cannot find RNG PF device\n");
return -EIO;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3db27916256..16f227b995e8 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
@@ -31,7 +32,7 @@ static struct hwrng *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
-/* list of registered rngs, sorted decending by quality */
+/* list of registered rngs */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -44,14 +45,14 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per 1024 bits of input");
+ "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
"default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void start_khwrngd(void);
+static void hwrng_manage_rngd(struct hwrng *rng);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -64,13 +65,12 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
int bytes_read;
- size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(rng_buffer, bytes_read);
+ add_device_randomness(rng_fillbuf, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
@@ -161,14 +161,13 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- current_quality = rng->quality ? : default_quality;
- if (current_quality > 1024)
- current_quality = 1024;
+ if (!rng->quality)
+ rng->quality = default_quality;
+ if (rng->quality > 1024)
+ rng->quality = 1024;
+ current_quality = rng->quality; /* obsolete */
- if (current_quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (current_quality > 0 && !hwrng_fill)
- start_khwrngd();
+ hwrng_manage_rngd(rng);
return 0;
}
@@ -298,24 +297,28 @@ static struct miscdevice rng_miscdev = {
static int enable_best_rng(void)
{
+ struct hwrng *rng, *new_rng = NULL;
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
- if (!ret)
- cur_rng_set_by_user = 0;
- } else {
+ /* no rng to use? */
+ if (list_empty(&rng_list)) {
drop_current_rng();
cur_rng_set_by_user = 0;
- ret = 0;
+ return 0;
+ }
+
+ /* use the rng which offers the best quality */
+ list_for_each_entry(rng, &rng_list, list) {
+ if (!new_rng || rng->quality > new_rng->quality)
+ new_rng = rng;
}
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+
return ret;
}
@@ -336,8 +339,9 @@ static ssize_t rng_current_store(struct device *dev,
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
- cur_rng_set_by_user = 1;
err = set_current_rng(rng);
+ if (!err)
+ cur_rng_set_by_user = 1;
break;
}
}
@@ -399,14 +403,76 @@ static ssize_t rng_selected_show(struct device *dev,
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
+static ssize_t rng_quality_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ if (!rng) /* no need to put_rng */
+ return -ENODEV;
+
+ ret = sysfs_emit(buf, "%hu\n", rng->quality);
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_quality_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ u16 quality;
+ int ret = -EINVAL;
+
+ if (len < 2)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rng_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ ret = kstrtou16(buf, 0, &quality);
+ if (ret || quality > 1024) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!current_rng) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ current_rng->quality = quality;
+ current_quality = quality; /* obsolete */
+
+ /* the best available RNG may have changed */
+ ret = enable_best_rng();
+
+ /* start/stop rngd if necessary */
+ if (current_rng)
+ hwrng_manage_rngd(current_rng);
+
+out:
+ mutex_unlock(&rng_mutex);
+ return ret ? ret : len;
+}
+
static DEVICE_ATTR_RW(rng_current);
static DEVICE_ATTR_RO(rng_available);
static DEVICE_ATTR_RO(rng_selected);
+static DEVICE_ATTR_RW(rng_quality);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
&dev_attr_rng_selected.attr,
+ &dev_attr_rng_quality.attr,
NULL
};
@@ -424,9 +490,11 @@ static int __init register_miscdev(void)
static int hwrng_fillfn(void *unused)
{
+ size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
long rc;
while (!kthread_should_stop()) {
+ unsigned short quality;
struct hwrng *rng;
rng = get_current_rng();
@@ -435,27 +503,49 @@ static int hwrng_fillfn(void *unused)
mutex_lock(&reading_mutex);
rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
+ if (current_quality != rng->quality)
+ rng->quality = current_quality; /* obsolete */
+ quality = rng->quality;
mutex_unlock(&reading_mutex);
put_rng(rng);
+
+ if (!quality)
+ break;
+
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+
+ /* If we cannot credit at least one bit of entropy,
+ * keep track of the remainder for the next iteration
+ */
+ entropy = rc * quality * 8 + entropy_credit;
+ if ((entropy >> 10) == 0)
+ entropy_credit = entropy;
+
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
- rc * current_quality * 8 >> 10);
+ entropy >> 10);
}
hwrng_fill = NULL;
return 0;
}
-static void start_khwrngd(void)
+static void hwrng_manage_rngd(struct hwrng *rng)
{
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
+ if (WARN_ON(!mutex_is_locked(&rng_mutex)))
+ return;
+
+ if (rng->quality == 0 && hwrng_fill)
+ kthread_stop(hwrng_fill);
+ if (rng->quality > 0 && !hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
}
}
@@ -463,7 +553,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- struct list_head *rng_list_ptr;
bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
@@ -477,18 +566,11 @@ int hwrng_register(struct hwrng *rng)
if (strcmp(tmp->name, rng->name) == 0)
goto out_unlock;
}
+ list_add_tail(&rng->list, &rng_list);
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
- /* rng_list is sorted by decreasing quality */
- list_for_each(rng_list_ptr, &rng_list) {
- tmp = list_entry(rng_list_ptr, struct hwrng, list);
- if (tmp->quality < rng->quality)
- break;
- }
- list_add_tail(&rng->list, rng_list_ptr);
-
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
@@ -638,7 +720,7 @@ static void __exit hwrng_modexit(void)
unregister_miscdev();
}
-module_init(hwrng_modinit);
+fs_initcall(hwrng_modinit); /* depends on misc_register() */
module_exit(hwrng_modexit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 67947a19aa22..e8f9621e7954 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
out_clk:
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 68613f0b6887..66ce7c03a142 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,320 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * random.c -- A strong random number generator
- *
* Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
- *
- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
- * rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * (now, with legal B.S. out of the way.....)
- *
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic use.
- * Besides the obvious cryptographic uses, these numbers are also good
- * for seeding TCP sequence numbers, and other places where it is
- * desirable to have numbers which are not only random, but hard to
- * predict by an attacker.
- *
- * Theory of operation
- * ===================
- *
- * Computers are very predictable devices. Hence it is extremely hard
- * to produce truly random numbers on a computer --- as opposed to
- * pseudo-random numbers, which can easily generated by using a
- * algorithm. Unfortunately, it is very easy for attackers to guess
- * the sequence of pseudo-random number generators, and for some
- * applications this is not acceptable. So instead, we must try to
- * gather "environmental noise" from the computer's environment, which
- * must be hard for outside attackers to observe, and use that to
- * generate random numbers. In a Unix environment, this is best done
- * from inside the kernel.
- *
- * Sources of randomness from the environment include inter-keyboard
- * timings, inter-interrupt timings from some interrupts, and other
- * events which are both (a) non-deterministic and (b) hard for an
- * outside observer to measure. Randomness from these sources are
- * added to an "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the BLAKE2s
- * hash of the contents of the "entropy pool". The BLAKE2s hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of BLAKE2s from its output. Even if it is possible to
- * analyze BLAKE2s in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
- * bits of "true randomness" are contained in the entropy pool as it
- * outputs random numbers.
- *
- * If this estimate goes to zero, the routine can still generate
- * random numbers; however, an attacker may (at least in theory) be
- * able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of BLAKE2s, which is
- * not believed to be feasible, but there is a remote possibility.
- * Nonetheless, these numbers should be useful for the vast majority
- * of purposes.
- *
- * Exported interfaces ---- output
- * ===============================
- *
- * There are four exported interfaces; two for use within the kernel,
- * and two for use from userspace.
- *
- * Exported interfaces ---- userspace output
- * -----------------------------------------
- *
- * The userspace interfaces are two character devices /dev/random and
- * /dev/urandom. /dev/random is suitable for use when very high
- * quality randomness is desired (for example, for key generation or
- * one-time pads), as it will only return a maximum of the number of
- * bits of randomness (as estimated by the random number generator)
- * contained in the entropy pool.
- *
- * The /dev/urandom device does not have this limit, and will return
- * as many bytes as are requested. As more and more random bytes are
- * requested without giving time for the entropy pool to recharge,
- * this will result in random numbers that are merely cryptographically
- * strong. For many applications, however, this is acceptable.
- *
- * Exported interfaces ---- kernel output
- * --------------------------------------
- *
- * The primary kernel interface is
- *
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer. This is equivalent to a
- * read from /dev/urandom.
- *
- * For less critical applications, there are the functions:
- *
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
- *
- * These are produced by a cryptographic RNG seeded from get_random_bytes,
- * and so do not deplete the entropy pool as much. These are recommended
- * for most in-kernel operations *if the result is going to be stored in
- * the kernel*.
- *
- * Specifically, the get_random_int() family do not attempt to do
- * "anti-backtracking". If you capture the state of the kernel (e.g.
- * by snapshotting the VM), you can figure out previous get_random_int()
- * return values. But if the value is stored in the kernel anyway,
- * this is not a problem.
- *
- * It *is* safe to expose get_random_int() output to attackers (e.g. as
- * network cookies); given outputs 1..n, it's not feasible to predict
- * outputs 0 or n+1. The only concern is an attacker who breaks into
- * the kernel later; the get_random_int() engine is not reseeded as
- * often as the get_random_bytes() one.
- *
- * get_random_bytes() is needed for keys that need to stay secret after
- * they are erased from the kernel. For example, any key that will
- * be wrapped and stored encrypted. And session encryption keys: we'd
- * like to know that after the session is closed and the keys erased,
- * the plaintext is unrecoverable to someone who recorded the ciphertext.
- *
- * But for network ports/cookies, stack canaries, PRNG seeds, address
- * space layout randomization, session *authentication* keys, or other
- * applications where the sensitive data is stored in the kernel in
- * plaintext for as long as it's sensitive, the get_random_int() family
- * is just fine.
- *
- * Consider ASLR. We want to keep the address space secret from an
- * outside attacker while the process is running, but once the address
- * space is torn down, it's of no use to an attacker any more. And it's
- * stored in kernel data structures as long as it's alive, so worrying
- * about an attacker's ability to extrapolate it from the get_random_int()
- * CRNG is silly.
- *
- * Even some cryptographic keys are safe to generate with get_random_int().
- * In particular, keys for SipHash are generally fine. Here, knowledge
- * of the key authorizes you to do something to a kernel object (inject
- * packets to a network connection, or flood a hash table), and the
- * key is stored with the object being protected. Once it goes away,
- * we no longer care if anyone knows the key.
- *
- * prandom_u32()
- * -------------
- *
- * For even weaker applications, see the pseudorandom generator
- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
- * numbers aren't security-critical at all, these are *far* cheaper.
- * Useful for self-tests, random error simulation, randomized backoffs,
- * and any other application where you trust that nobody is trying to
- * maliciously mess with you by guessing the "random" numbers.
- *
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
- * void add_interrupt_randomness(int irq);
- * void add_disk_randomness(struct gendisk *disk);
- * void add_hwgenerator_randomness(const char *buffer, size_t count,
- * size_t entropy);
- * void add_bootloader_randomness(const void *buf, unsigned int size);
- *
- * add_device_randomness() is for adding data to the random pool that
- * is likely to differ between two devices (or possibly even per boot).
- * This would be things like MAC addresses or serial numbers, or the
- * read-out of the RTC. This does *not* add any actual entropy to the
- * pool, but it initializes the pool to different values for devices
- * that might otherwise be identical and have very little entropy
- * available to them (particularly common in the embedded world).
- *
- * add_input_randomness() uses the input layer interrupt timing, as well as
- * the event type information from the hardware.
- *
- * add_interrupt_randomness() uses the interrupt timing as random
- * inputs to the entropy pool. Using the cycle counters and the irq source
- * as inputs, it feeds the randomness roughly once a second.
- *
- * add_disk_randomness() uses what amounts to the seek time of block
- * layer request events, on a per-disk_devt basis, as input to the
- * entropy pool. Note that high-speed solid state drives with very low
- * seek times do not make for good sources of entropy, as their seek
- * times are usually fairly consistent.
- *
- * All of these routines try to estimate how many bits of randomness a
- * particular randomness source. They do this by keeping track of the
- * first and second order deltas of the event timings.
- *
- * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
- * entropy as specified by the caller. If the entropy pool is full it will
- * block until more entropy is needed.
- *
- * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
- * add_device_randomness(), depending on whether or not the configuration
- * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
- *
- * Ensuring unpredictability at system startup
- * ============================================
- *
- * When any operating system starts up, it will go through a sequence
- * of actions that are fairly predictable by an adversary, especially
- * if the start-up does not involve interaction with a human operator.
- * This reduces the actual number of bits of unpredictability in the
- * entropy pool below the value in entropy_count. In order to
- * counteract this effect, it helps to carry information in the
- * entropy pool across shut-downs and start-ups. To do this, put the
- * following lines an appropriate script which is run during the boot
- * sequence:
- *
- * echo "Initializing random number generator..."
- * random_seed=/var/run/random-seed
- * # Carry a random seed from start-up to start-up
- * # Load and then save the whole entropy pool
- * if [ -f $random_seed ]; then
- * cat $random_seed >/dev/urandom
- * else
- * touch $random_seed
- * fi
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * and the following lines in an appropriate script which is run as
- * the system is shutdown:
- *
- * # Carry a random seed from shut-down to start-up
- * # Save the whole entropy pool
- * echo "Saving random seed..."
- * random_seed=/var/run/random-seed
- * touch $random_seed
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * For example, on most modern systems using the System V init
- * scripts, such code fragments would be found in
- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
- *
- * Effectively, these commands cause the contents of the entropy pool
- * to be saved at shut-down time and reloaded into the entropy pool at
- * start-up. (The 'dd' in the addition to the bootup script is to
- * make sure that /etc/random-seed is different for every start-up,
- * even if the system crashes without executing rc.0.) Even with
- * complete knowledge of the start-up activities, predicting the state
- * of the entropy pool requires knowledge of the previous history of
- * the system.
- *
- * Configuring the /dev/random driver under Linux
- * ==============================================
- *
- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
- * the /dev/mem major number (#1). So if your system does not have
- * /dev/random and /dev/urandom created already, they can be created
- * by using the commands:
- *
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
- *
- * Acknowledgements:
- * =================
- *
- * Ideas for constructing this random number generator were derived
- * from Pretty Good Privacy's random number generator, and from private
- * discussions with Phil Karn. Colin Plumb provided a faster random
- * number generator, which speed up the mixing function of the entropy
- * pool, taken from PGPfone. Dale Worley has also contributed many
- * useful ideas and suggestions to improve this driver.
- *
- * Any flaws in the design are solely my responsibility, and should
- * not be attributed to the Phil, Colin, or any of authors of PGP.
- *
- * Further background information on this topic may be obtained from
- * RFC 1750, "Randomness Recommendations for Security", by Donald
- * Eastlake, Steve Crocker, and Jeff Schiller.
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
+ *
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
+ *
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
+ *
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Some of that data is then "credited" as
+ * having a certain number of bits of entropy. When enough bits of entropy are
+ * available, the hash is finalized and handed as a key to a stream cipher that
+ * expands it indefinitely for various consumers. This key is periodically
+ * refreshed as the various entropy collectors, described below, add data to the
+ * input pool and credit it. There is currently no Fortuna-like scheduler
+ * involved, which can lead to malicious entropy sources causing a premature
+ * reseed, and the entropy estimates are, at best, conservative guesses.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -330,7 +38,7 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
@@ -344,744 +52,947 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/uaccess.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
-
#include <asm/processor.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/random.h>
-
-/* #define ADD_INTERRUPT_BENCH */
-
-/*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
-static int random_write_wakeup_bits = 28 * (1 << 5);
-
-/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Modeling and Computer
- * Simulation 4:254-266)
+/*********************************************************************
*
- * Thanks to Colin Plumb for suggesting this.
+ * Initialization and readiness waiting.
*
- * The mixing operation is much less sensitive than the output hash,
- * where we use BLAKE2s. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
*
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
- */
-enum poolinfo {
- POOL_WORDS = 128,
- POOL_WORDMASK = POOL_WORDS - 1,
- POOL_BYTES = POOL_WORDS * sizeof(u32),
- POOL_BITS = POOL_BYTES * 8,
- POOL_BITSHIFT = ilog2(POOL_BITS),
-
- /* To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits. */
- POOL_ENTROPY_SHIFT = 3,
-#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
- POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
-
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- POOL_TAP1 = 104,
- POOL_TAP2 = 76,
- POOL_TAP3 = 51,
- POOL_TAP4 = 25,
- POOL_TAP5 = 1,
-
- EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
-};
-
-/*
- * Static global variables
- */
-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static struct fasync_struct *fasync;
-
-static DEFINE_SPINLOCK(random_ready_list_lock);
-static LIST_HEAD(random_ready_list);
-
-struct crng_state {
- u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
-};
-
-static struct crng_state primary_crng = {
- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
- .state[0] = CHACHA_CONSTANT_EXPA,
- .state[1] = CHACHA_CONSTANT_ND_3,
- .state[2] = CHACHA_CONSTANT_2_BY,
- .state[3] = CHACHA_CONSTANT_TE_K,
-};
+ *********************************************************************/
/*
* crng_init = 0 --> Uninitialized
* 1 --> Initialized
* 2 --> Initialized from input_pool
*
- * crng_init is protected by primary_crng->lock, and only increases
+ * crng_init is protected by base_crng->lock, and only increases
* its value (from 0->1->2).
*/
static int crng_init = 0;
-static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
-static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
- u8 tmp[CHACHA_BLOCK_SIZE], int used);
-static void process_random_ready_list(void);
-static void _get_random_bytes(void *buf, int nbytes);
+/* Various types of waiters for crng_init->2 transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
+static DEFINE_SPINLOCK(random_ready_chain_lock);
+static RAW_NOTIFIER_HEAD(random_ready_chain);
+/* Control how we warn userspace. */
static struct ratelimit_state unseeded_warning =
RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
static struct ratelimit_state urandom_warning =
RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
-
static int ratelimit_disable __read_mostly;
-
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
-/**********************************************************************
- *
- * OS independent entropy store. Here are the functions which handle
- * storing entropy in an entropy pool.
+/*
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
*
- **********************************************************************/
-
-static u32 input_pool_data[POOL_WORDS] __latent_entropy;
-
-static struct {
- spinlock_t lock;
- u16 add_ptr;
- u16 input_rotate;
- int entropy_count;
-} input_pool = {
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
-};
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
-static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
-static ssize_t _extract_entropy(void *buf, size_t nbytes);
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
-static void crng_reseed(struct crng_state *crng, bool use_input_pool);
+/*
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
-static const u32 twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
/*
- * This function adds bytes into the entropy "pool". It does not
- * update the entropy estimate. The caller should call
- * credit_entropy_bits if this is appropriate.
+ * Add a callback function that will be invoked when the input
+ * pool is initialised.
*
- * The pool is stirred with a primitive polynomial of the appropriate
- * degree, and then twisted. We twist by three bits at a time because
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
+ * returns: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
*/
-static void _mix_pool_bytes(const void *in, int nbytes)
+int register_random_ready_notifier(struct notifier_block *nb)
{
- unsigned long i;
- int input_rotate;
- const u8 *bytes = in;
- u32 w;
-
- input_rotate = input_pool.input_rotate;
- i = input_pool.add_ptr;
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
- i = (i - 1) & POOL_WORDMASK;
-
- /* XOR in the various taps */
- w ^= input_pool_data[i];
- w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
-
- /* Mix the result back in with a twist */
- input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
+ unsigned long flags;
+ int ret = -EALREADY;
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
+ if (crng_ready())
+ return ret;
- input_pool.input_rotate = input_rotate;
- input_pool.add_ptr = i;
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ if (!crng_ready())
+ ret = raw_notifier_chain_register(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
}
-static void __mix_pool_bytes(const void *in, int nbytes)
+/*
+ * Delete a previously registered readiness callback function.
+ */
+int unregister_random_ready_notifier(struct notifier_block *nb)
{
- trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
- _mix_pool_bytes(in, nbytes);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
}
-static void mix_pool_bytes(const void *in, int nbytes)
+static void process_random_ready_list(void)
{
unsigned long flags;
- trace_mix_pool_bytes(nbytes, _RET_IP_);
- spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(in, nbytes);
- spin_unlock_irqrestore(&input_pool.lock, flags);
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
}
-struct fast_pool {
- u32 pool[4];
- unsigned long last;
- u16 reg_idx;
- u8 count;
-};
+#define warn_unseeded_randomness(previous) \
+ _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
- */
-static void fast_mix(struct fast_pool *f)
+static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
{
- u32 a = f->pool[0], b = f->pool[1];
- u32 c = f->pool[2], d = f->pool[3];
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ const bool print_once = false;
+#else
+ static bool print_once __read_mostly;
+#endif
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+ if (print_once || crng_ready() ||
+ (previous && (caller == READ_ONCE(*previous))))
+ return;
+ WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ print_once = true;
+#endif
+ if (__ratelimit(&unseeded_warning))
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
+}
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+/*********************************************************************
+ *
+ * Fast key erasure RNG, the "crng".
+ *
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
+ *
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t nbytes)
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The integer family of functions may be
+ * higher performance for one-off random integers, because they do a
+ * bit of buffering.
+ *
+ *********************************************************************/
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
+enum {
+ CRNG_RESEED_INTERVAL = 300 * HZ,
+ CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
+};
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
-}
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long birth;
+ unsigned long generation;
+ spinlock_t lock;
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
+};
-static void process_random_ready_list(void)
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
+};
+
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
+};
+
+/* Used by crng_reseed() to extract a new seed from the input pool. */
+static bool drain_entropy(void *buf, size_t nbytes, bool force);
+
+/*
+ * This extracts a new crng key from the input pool, but only if there is a
+ * sufficient amount of entropy available or force is true, in order to
+ * mitigate bruteforcing of newly added bits.
+ */
+static void crng_reseed(bool force)
{
unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
+ bool finalize_init = false;
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
+ /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
+ if (!drain_entropy(key, sizeof(key), force))
+ return;
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ WRITE_ONCE(base_crng.birth, jiffies);
+ if (!crng_ready()) {
+ crng_init = 2;
+ finalize_init = true;
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
+ if (finalize_init) {
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
}
/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
*/
-static void credit_entropy_bits(int nbits)
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- int entropy_count, entropy_bits, orig;
- int nfrac = nbits << POOL_ENTROPY_SHIFT;
-
- /* Ensure that the multiplication can avoid being 64 bits wide. */
- BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
+ u8 first_block[CHACHA_BLOCK_SIZE];
- if (!nbits)
- return;
+ BUG_ON(random_data_len > 32);
-retry:
- entropy_count = orig = READ_ONCE(input_pool.entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
- unsigned int add =
- ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
- }
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: count %d\n", entropy_count);
- entropy_count = 0;
- } else if (entropy_count > POOL_FRACBITS)
- entropy_count = POOL_FRACBITS;
- if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
+}
- entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, true);
+/*
+ * Return whether the crng seed is considered to be sufficiently
+ * old that a reseeding might be attempted. This happens if the last
+ * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at
+ * an interval proportional to the uptime.
+ */
+static bool crng_has_old_seed(void)
+{
+ static bool early_boot = true;
+ unsigned long interval = CRNG_RESEED_INTERVAL;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ interval = max_t(unsigned int, 5 * HZ,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
}
-static int credit_entropy_bits_safe(int nbits)
+/*
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
+ */
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- if (nbits < 0)
- return -EINVAL;
+ unsigned long flags;
+ struct crng *crng;
- /* Cap the value to avoid overflows */
- nbits = min(nbits, POOL_BITS);
+ BUG_ON(random_data_len > 32);
- credit_entropy_bits(nbits);
- return 0;
-}
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, because
+ * this is what crng_pre_init_inject() mutates during early init.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready)
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
+ }
-/*********************************************************************
- *
- * CRNG using CHACHA20
- *
- *********************************************************************/
+ /*
+ * If the base_crng is old enough, we try to reseed, which in turn
+ * bumps the generation counter that we check below.
+ */
+ if (unlikely(crng_has_old_seed()))
+ crng_reseed(false);
-#define CRNG_RESEED_INTERVAL (300 * HZ)
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
+ }
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
+}
/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel. The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
+ * This function is for crng_init == 0 only. It loads entropy directly
+ * into the crng's key, without going through the input pool. It is,
+ * generally speaking, not very safe, but we use this only at early
+ * boot time when it's better to have something there rather than
+ * nothing.
+ *
+ * If account is set, then the crng_init_cnt counter is incremented.
+ * This shouldn't be set by functions like add_device_randomness(),
+ * where we can't trust the buffer passed to it is guaranteed to be
+ * unpredictable (so it might not have any entropy at all).
+ *
+ * Returns the number of bytes processed from input, which is bounded
+ * by CRNG_INIT_CNT_THRESH if account is true.
*/
-static struct crng_state **crng_node_pool __read_mostly;
+static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+{
+ static int crng_init_cnt = 0;
+ struct blake2s_state hash;
+ unsigned long flags;
-static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
+ blake2s_init(&hash, sizeof(base_crng.key));
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static int __init parse_trust_cpu(char *arg)
-{
- return kstrtobool(arg, &trust_cpu);
-}
-early_param("random.trust_cpu", parse_trust_cpu);
+ spin_lock_irqsave(&base_crng.lock, flags);
+ if (crng_init != 0) {
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ return 0;
+ }
-static bool crng_init_try_arch(struct crng_state *crng)
-{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ if (account)
+ len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
+ blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
+ blake2s_update(&hash, input, len);
+ blake2s_final(&hash, base_crng.key);
+
+ if (account) {
+ crng_init_cnt += len;
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ ++base_crng.generation;
+ crng_init = 1;
}
- crng->state[i] ^= rv;
}
- return arch_init;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+
+ if (crng_init == 1)
+ pr_notice("fast init done\n");
+
+ return len;
}
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
+static void _get_random_bytes(void *buf, size_t nbytes)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t len;
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long_early(&rv) &&
- !arch_get_random_long_early(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
+ if (!nbytes)
+ return;
+
+ len = min_t(size_t, 32, nbytes);
+ crng_make_state(chacha_state, buf, len);
+ nbytes -= len;
+ buf += len;
+
+ while (nbytes) {
+ if (nbytes < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, nbytes);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
}
- crng->state[i] ^= rv;
+
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ nbytes -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
}
- return arch_init;
+ memzero_explicit(chacha_state, sizeof(chacha_state));
}
-static void crng_initialize_secondary(struct crng_state *crng)
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not rely on the hardware random
+ * number generator. For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+void get_random_bytes(void *buf, size_t nbytes)
{
- chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
- crng_init_try_arch(crng);
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
+ static void *previous;
-static void __init crng_initialize_primary(struct crng_state *crng)
-{
- _extract_entropy(&crng->state[4], sizeof(u32) * 12);
- if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- pr_notice("crng init done (trusting CPU's manufacturer)\n");
- }
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ warn_unseeded_randomness(&previous);
+ _get_random_bytes(buf, nbytes);
}
+EXPORT_SYMBOL(get_random_bytes);
-static void crng_finalize_init(struct crng_state *crng)
+static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- if (crng != &primary_crng || crng_init >= 2)
- return;
- if (!system_wq) {
- /* We can't call numa_crng_init until we have workqueues,
- * so mark this for processing later. */
- crng_need_final_init = true;
- return;
- }
+ bool large_request = nbytes > 256;
+ ssize_t ret = 0;
+ size_t len;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 output[CHACHA_BLOCK_SIZE];
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
-}
+ if (!nbytes)
+ return 0;
-static void do_numa_crng_init(struct work_struct *work)
-{
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize_secondary(crng);
- pool[i] = crng;
- }
- /* pairs with READ_ONCE() in select_crng() */
- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
- for_each_node(i)
- kfree(pool[i]);
- kfree(pool);
- }
-}
+ len = min_t(size_t, 32, nbytes);
+ crng_make_state(chacha_state, output, len);
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+ if (copy_to_user(buf, output, len))
+ return -EFAULT;
+ nbytes -= len;
+ buf += len;
+ ret += len;
-static void numa_crng_init(void)
-{
- if (IS_ENABLED(CONFIG_NUMA))
- schedule_work(&numa_crng_init_work);
-}
+ while (nbytes) {
+ if (large_request && need_resched()) {
+ if (signal_pending(current))
+ break;
+ schedule();
+ }
-static struct crng_state *select_crng(void)
-{
- if (IS_ENABLED(CONFIG_NUMA)) {
- struct crng_state **pool;
- int nid = numa_node_id();
-
- /* pairs with cmpxchg_release() in do_numa_crng_init() */
- pool = READ_ONCE(crng_node_pool);
- if (pool && pool[nid])
- return pool[nid];
+ chacha20_block(chacha_state, output);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+
+ len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
+ if (copy_to_user(buf, output, len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= len;
+ buf += len;
+ ret += len;
}
- return &primary_crng;
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(output, sizeof(output));
+ return ret;
}
/*
- * crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally. Returns the number of
- * bytes processed from cp.
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
*/
-static size_t crng_fast_load(const u8 *cp, size_t len)
+struct batched_entropy {
+ union {
+ /*
+ * We make this 1.5x a ChaCha block, so that we get the
+ * remaining 32 bytes from fast key erasure, plus one full
+ * block from the detached ChaCha state. We can increase
+ * the size of this later if needed so long as we keep the
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
+ */
+ u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
+ u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
+ };
+ local_lock_t lock;
+ unsigned long generation;
+ unsigned int position;
+};
+
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
+ .position = UINT_MAX
+};
+
+u64 get_random_u64(void)
{
+ u64 ret;
unsigned long flags;
- u8 *p;
- size_t ret = 0;
+ struct batched_entropy *batch;
+ static void *previous;
+ unsigned long next_gen;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- p = (u8 *)&primary_crng.state[4];
- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--; ret++;
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
- crng_init = 1;
- pr_notice("fast init done\n");
+ warn_unseeded_randomness(&previous);
+
+ local_lock_irqsave(&batched_entropy_u64.lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+
+ next_gen = READ_ONCE(base_crng.generation);
+ if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
+ next_gen != batch->generation) {
+ _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
+ batch->position = 0;
+ batch->generation = next_gen;
}
+
+ ret = batch->entropy_u64[batch->position];
+ batch->entropy_u64[batch->position] = 0;
+ ++batch->position;
+ local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
return ret;
}
+EXPORT_SYMBOL(get_random_u64);
-/*
- * crng_slow_load() is called by add_device_randomness, which has two
- * attributes. (1) We can't trust the buffer passed to it is
- * guaranteed to be unpredictable (so it might not have any entropy at
- * all), and (2) it doesn't have the performance constraints of
- * crng_fast_load().
- *
- * So we do something more comprehensive which is guaranteed to touch
- * all of the primary_crng's state, and which uses a LFSR with a
- * period of 255 as part of the mixing algorithm. Finally, we do
- * *not* advance crng_init_cnt since buffer we may get may be something
- * like a fixed DMI table (for example), which might very well be
- * unique to the machine, but is otherwise unvarying.
- */
-static int crng_slow_load(const u8 *cp, size_t len)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
+ .position = UINT_MAX
+};
+
+u32 get_random_u32(void)
{
+ u32 ret;
unsigned long flags;
- static u8 lfsr = 1;
- u8 tmp;
- unsigned int i, max = CHACHA_KEY_SIZE;
- const u8 *src_buf = cp;
- u8 *dest_buf = (u8 *)&primary_crng.state[4];
+ struct batched_entropy *batch;
+ static void *previous;
+ unsigned long next_gen;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- if (len > max)
- max = len;
-
- for (i = 0; i < max; i++) {
- tmp = lfsr;
- lfsr >>= 1;
- if (tmp & 1)
- lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA_KEY_SIZE];
- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
- lfsr += (tmp << 3) | (tmp >> 5);
+ warn_unseeded_randomness(&previous);
+
+ local_lock_irqsave(&batched_entropy_u32.lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+
+ next_gen = READ_ONCE(base_crng.generation);
+ if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
+ next_gen != batch->generation) {
+ _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
+ batch->position = 0;
+ batch->generation = next_gen;
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 1;
+
+ ret = batch->entropy_u32[batch->position];
+ batch->entropy_u32[batch->position] = 0;
+ ++batch->position;
+ local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
+ return ret;
}
+EXPORT_SYMBOL(get_random_u32);
-static void crng_reseed(struct crng_state *crng, bool use_input_pool)
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int random_prepare_cpu(unsigned int cpu)
{
- unsigned long flags;
- int i, num;
- union {
- u8 block[CHACHA_BLOCK_SIZE];
- u32 key[8];
- } buf;
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
+#endif
- if (use_input_pool) {
- num = extract_entropy(&buf, 32, 16);
- if (num == 0)
- return;
- } else {
- _extract_crng(&primary_crng, buf.block);
- _crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA_KEY_SIZE);
- }
- spin_lock_irqsave(&crng->lock, flags);
- for (i = 0; i < 8; i++) {
- unsigned long rv;
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- crng->state[i + 4] ^= buf.key[i] ^ rv;
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
}
- memzero_explicit(&buf, sizeof(buf));
- WRITE_ONCE(crng->init_time, jiffies);
- spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
+
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
}
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. It is not recommended for
+ * use. Use get_random_bytes() instead. It returns the number of
+ * bytes filled in.
+ */
+size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
{
- unsigned long flags, init_time;
+ size_t left = nbytes;
+ u8 *p = buf;
- if (crng_ready()) {
- init_time = READ_ONCE(crng->init_time);
- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(crng, crng == &primary_crng);
+ while (left) {
+ unsigned long v;
+ size_t chunk = min_t(size_t, left, sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, chunk);
+ p += chunk;
+ left -= chunk;
}
- spin_lock_irqsave(&crng->lock, flags);
- chacha20_block(&crng->state[0], out);
- if (crng->state[12] == 0)
- crng->state[13]++;
- spin_unlock_irqrestore(&crng->lock, flags);
+
+ return nbytes - left;
}
+EXPORT_SYMBOL(get_random_bytes_arch);
-static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
+
+/**********************************************************************
+ *
+ * Entropy accumulation and extraction routines.
+ *
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *in, size_t nbytes)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_entropy_bits(size_t nbits)
+ *
+ * Finally, extract entropy via these two, with the latter one
+ * setting the entropy count to zero and extracting only if there
+ * is POOL_MIN_BITS entropy credited prior or force is true:
+ *
+ * static void extract_entropy(void *buf, size_t nbytes)
+ * static bool drain_entropy(void *buf, size_t nbytes, bool force)
+ *
+ **********************************************************************/
+
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
+};
+
+/* For notifying userspace should write into /dev/random. */
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int entropy_count;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
+
+static void _mix_pool_bytes(const void *in, size_t nbytes)
{
- _extract_crng(select_crng(), out);
+ blake2s_update(&input_pool.hash, in, nbytes);
}
/*
- * Use the leftover bytes from the CRNG block output (if there is
- * enough) to mutate the CRNG key to provide backtracking protection.
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+ * credit_entropy_bits if this is appropriate.
*/
-static void _crng_backtrack_protect(struct crng_state *crng,
- u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void mix_pool_bytes(const void *in, size_t nbytes)
{
unsigned long flags;
- u32 *s, *d;
- int i;
- used = round_up(used, sizeof(u32));
- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
- extract_crng(tmp);
- used = 0;
- }
- spin_lock_irqsave(&crng->lock, flags);
- s = (u32 *)&tmp[used];
- d = &crng->state[4];
- for (i = 0; i < 8; i++)
- *d++ ^= *s++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(in, nbytes);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
-static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void credit_entropy_bits(size_t nbits)
{
- _crng_backtrack_protect(select_crng(), tmp, used);
+ unsigned int entropy_count, orig, add;
+
+ if (!nbits)
+ return;
+
+ add = min_t(size_t, nbits, POOL_BITS);
+
+ do {
+ orig = READ_ONCE(input_pool.entropy_count);
+ entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
+
+ if (!crng_ready() && entropy_count >= POOL_MIN_BITS)
+ crng_reseed(false);
}
-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+/*
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
+ */
+static void extract_entropy(void *buf, size_t nbytes)
{
- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
- int large_request = (nbytes > 256);
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
+ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
+ !arch_get_random_long(&block.rdseed[i]))
+ block.rdseed[i] = random_get_entropy();
+ }
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- }
+ spin_lock_irqsave(&input_pool.lock, flags);
- extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
+
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+ while (nbytes) {
+ i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
nbytes -= i;
buf += i;
- ret += i;
}
- crng_backtrack_protect(tmp, i);
- /* Wipe data just written to memory */
- memzero_explicit(tmp, sizeof(tmp));
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
+}
- return ret;
+/*
+ * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force
+ * is true, and then we set the entropy count to zero (but don't actually touch
+ * any data). Only then can we extract a new key with extract_entropy().
+ */
+static bool drain_entropy(void *buf, size_t nbytes, bool force)
+{
+ unsigned int entropy_count;
+ do {
+ entropy_count = READ_ONCE(input_pool.entropy_count);
+ if (!force && entropy_count < POOL_MIN_BITS)
+ return false;
+ } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
+ extract_entropy(buf, nbytes);
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ return true;
}
-/*********************************************************************
+
+/**********************************************************************
*
- * Entropy input management
+ * Entropy collection routines.
*
- *********************************************************************/
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ * void add_hwgenerator_randomness(const void *buffer, size_t count,
+ * size_t entropy);
+ * void add_bootloader_randomness(const void *buf, size_t size);
+ * void add_vmfork_randomness(const void *unique_vm_id, size_t size);
+ * void add_interrupt_randomness(int irq);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The above two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+ * add_device_randomness(), depending on whether or not the configuration
+ * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
+ * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
+ * representing the current instance of a VM to the pool, without crediting,
+ * and then force-reseeds the crng so that it takes effect immediately.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ **********************************************************************/
-/* There is one of these per entropy source */
-struct timer_rand_state {
- cycles_t last_time;
- long last_delta, last_delta2;
-};
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+ return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+/*
+ * The first collection of entropy occurs at system boot while interrupts
+ * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
+ * Depending on the above configuration knob, RDSEED may be considered
+ * sufficient for initialization. Note that much earlier setup may already
+ * have pushed entropy into the input pool by the time we get here.
+ */
+int __init rand_initialize(void)
+{
+ size_t i;
+ ktime_t now = ktime_get_real();
+ bool arch_init = true;
+ unsigned long rv;
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
+ if (!arch_get_random_seed_long_early(&rv) &&
+ !arch_get_random_long_early(&rv)) {
+ rv = random_get_entropy();
+ arch_init = false;
+ }
+ _mix_pool_bytes(&rv, sizeof(rv));
+ }
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ ++base_crng.generation;
+
+ if (arch_init && trust_cpu && !crng_ready()) {
+ crng_init = 2;
+ pr_notice("crng init done (trusting CPU's manufacturer)\n");
+ }
+
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
+ }
+ return 0;
+}
/*
* Add device- or boot-specific data to the input pool to help
@@ -1091,23 +1002,27 @@ struct timer_rand_state {
* the entropy pool having similar initial state across largely
* identical devices.
*/
-void add_device_randomness(const void *buf, unsigned int size)
+void add_device_randomness(const void *buf, size_t size)
{
- unsigned long time = random_get_entropy() ^ jiffies;
- unsigned long flags;
+ cycles_t cycles = random_get_entropy();
+ unsigned long flags, now = jiffies;
- if (!crng_ready() && size)
- crng_slow_load(buf, size);
+ if (crng_init == 0 && size)
+ crng_pre_init_inject(buf, size, false);
- trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&cycles, sizeof(cycles));
+ _mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(buf, size);
- _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
/*
* This function adds entropy to the entropy "pool" by using timing
@@ -1117,29 +1032,26 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
* The number "num" is also added to the pool - it should somehow describe
* the type of event which just happened. This is currently 0-255 for
* keyboard scan codes, and 256 upwards for interrupts.
- *
*/
-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- struct {
- long jiffies;
- unsigned int cycles;
- unsigned int num;
- } sample;
+ cycles_t cycles = random_get_entropy();
+ unsigned long flags, now = jiffies;
long delta, delta2, delta3;
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- mix_pool_bytes(&sample, sizeof(sample));
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&cycles, sizeof(cycles));
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - READ_ONCE(state->last_time);
- WRITE_ONCE(state->last_time, sample.jiffies);
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
delta2 = delta - READ_ONCE(state->last_delta);
WRITE_ONCE(state->last_delta, delta);
@@ -1163,318 +1075,303 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
- credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
+ credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value)
{
static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
- /* ignore autorepeat and the like */
+ /* Ignore autorepeat and the like. */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-
-#ifdef ADD_INTERRUPT_BENCH
-static unsigned long avg_cycles, avg_deviation;
-
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
-
-static void add_interrupt_bench(cycles_t start)
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ if (!disk || !disk->random)
+ return;
+ /* First major is 1, so we get >= 0x200 here. */
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
}
-#else
-#define add_interrupt_bench(x)
-#endif
+EXPORT_SYMBOL_GPL(add_disk_randomness);
-static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+void rand_initialize_disk(struct gendisk *disk)
{
- u32 *ptr = (u32 *)regs;
- unsigned int idx;
+ struct timer_rand_state *state;
- if (regs == NULL)
- return 0;
- idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(u32))
- idx = 0;
- ptr += idx++;
- WRITE_ONCE(f->reg_idx, idx);
- return *ptr;
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
+ }
}
+#endif
-void add_interrupt_randomness(int irq)
+/*
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ */
+void add_hwgenerator_randomness(const void *buffer, size_t count,
+ size_t entropy)
{
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- u32 c_high, j_high;
- u64 ip;
-
- if (cycles == 0)
- cycles = get_reg(fast_pool, regs);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^=
- (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-
if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
- return;
+ size_t ret = crng_pre_init_inject(buffer, count, true);
+ mix_pool_bytes(buffer, ret);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
- if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
- return;
+ /*
+ * Throttle writing if we're above the trickle threshold.
+ * We'll be woken up again once below POOL_MIN_BITS, when
+ * the calling thread is about to terminate, or once
+ * CRNG_RESEED_INTERVAL has elapsed.
+ */
+ wait_event_interruptible_timeout(random_write_wait,
+ !system_wq || kthread_should_stop() ||
+ input_pool.entropy_count < POOL_MIN_BITS,
+ CRNG_RESEED_INTERVAL);
+ mix_pool_bytes(buffer, count);
+ credit_entropy_bits(entropy);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
- if (!spin_trylock(&input_pool.lock))
- return;
+/*
+ * Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ */
+void add_bootloader_randomness(const void *buf, size_t size)
+{
+ if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+ add_hwgenerator_randomness(buf, size, size * 8);
+ else
+ add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
- fast_pool->last = now;
- __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&input_pool.lock);
+#if IS_ENABLED(CONFIG_VMGENID)
+static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
- fast_pool->count = 0;
+/*
+ * Handle a new unique VM ID, which is unique, not secret, so we
+ * don't credit it, but we do immediately force a reseed after so
+ * that it's used by the crng posthaste.
+ */
+void add_vmfork_randomness(const void *unique_vm_id, size_t size)
+{
+ add_device_randomness(unique_vm_id, size);
+ if (crng_ready()) {
+ crng_reseed(true);
+ pr_notice("crng reseeded due to virtual machine fork\n");
+ }
+ blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
+}
+#if IS_MODULE(CONFIG_VMGENID)
+EXPORT_SYMBOL_GPL(add_vmfork_randomness);
+#endif
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(1);
+int register_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vmfork_chain, nb);
}
-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
-#ifdef CONFIG_BLOCK
-void add_disk_randomness(struct gendisk *disk)
+int unregister_random_vmfork_notifier(struct notifier_block *nb)
{
- if (!disk || !disk->random)
- return;
- /* first major is 1, so we get >= 0x200 here */
- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
+ return blocking_notifier_chain_unregister(&vmfork_chain, nb);
}
-EXPORT_SYMBOL_GPL(add_disk_randomness);
+EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
-/*********************************************************************
- *
- * Entropy extraction routines
- *
- *********************************************************************/
+struct fast_pool {
+ struct work_struct mix;
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+ u16 reg_idx;
+};
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+ /* SipHash constants */
+ .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL,
+ 0x6c7967656e657261UL, 0x7465646279746573UL }
+#else
+ /* HalfSipHash constants */
+ .pool = { 0, 0, 0x6c796765U, 0x74656462U }
+#endif
+};
/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * 128 or 256-bit SipHash state, while v represents a 128-bit input.
*/
-static size_t account(size_t nbytes, int min)
+static void fast_mix(unsigned long s[4], const unsigned long *v)
{
- int entropy_count, orig;
- size_t ibytes, nfrac;
-
- BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(input_pool.entropy_count);
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: count %d\n", entropy_count);
- entropy_count = 0;
- }
-
- /* never pull more than available */
- ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
- if (ibytes < min)
- ibytes = 0;
- nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
- if ((size_t)entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
+ size_t i;
- if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(8 * ibytes);
- if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
+ for (i = 0; i < 16 / sizeof(long); ++i) {
+ s[3] ^= v[i];
+#ifdef CONFIG_64BIT
+ s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32);
+ s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2];
+ s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0];
+ s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32);
+#else
+ s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16);
+ s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2];
+ s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0];
+ s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16);
+#endif
+ s[0] ^= v[i];
}
-
- return ibytes;
}
+#ifdef CONFIG_SMP
/*
- * This function does the actual extraction for extract_entropy.
- *
- * Note: we assume that .poolwords is a multiple of 16 words.
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
-static void extract_buf(u8 *out)
+int random_online_cpu(unsigned int cpu)
{
- struct blake2s_state state __aligned(__alignof__(unsigned long));
- u8 hash[BLAKE2S_HASH_SIZE];
- unsigned long *salt;
- unsigned long flags;
-
- blake2s_init(&state, sizeof(hash));
-
/*
- * If we have an architectural hardware random number
- * generator, use it for BLAKE2's salt & personal fields.
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
*/
- for (salt = (unsigned long *)&state.h[4];
- salt < (unsigned long *)&state.h[8]; ++salt) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- *salt ^= v;
- }
-
- /* Generate a hash across the pool */
- spin_lock_irqsave(&input_pool.lock, flags);
- blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
- blake2s_final(&state, hash); /* final zeros out state */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
- /*
- * We mix the hash back into the pool to prevent backtracking
- * attacks (where the attacker knows the state of the pool
- * plus the current outputs, and attempts to find previous
- * outputs), unless the hash function can be inverted. By
- * mixing at least a hash worth of hash data back, we make
- * brute-forcing the feedback as hard as brute-forcing the
- * hash.
- */
- __mix_pool_bytes(hash, sizeof(hash));
- spin_unlock_irqrestore(&input_pool.lock, flags);
+static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
+{
+ unsigned long *ptr = (unsigned long *)regs;
+ unsigned int idx;
- /* Note that EXTRACT_SIZE is half of hash size here, because above
- * we've dumped the full length back into mixer. By reducing the
- * amount that we emit, we retain a level of forward secrecy.
- */
- memcpy(out, hash, EXTRACT_SIZE);
- memzero_explicit(hash, sizeof(hash));
+ if (regs == NULL)
+ return 0;
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
-static ssize_t _extract_entropy(void *buf, size_t nbytes)
+static void mix_interrupt_randomness(struct work_struct *work)
{
- ssize_t ret = 0, i;
- u8 tmp[EXTRACT_SIZE];
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ /*
+ * The size of the copied stack pool is explicitly 16 bytes so that we
+ * tax mix_pool_byte()'s compression function the same amount on all
+ * platforms. This means on 64-bit we copy half the pool into this,
+ * while on 32-bit we copy all of it. The entropy is supposed to be
+ * sufficiently dispersed between bits that in the sponge-like
+ * half case, on average we don't wind up "losing" some.
+ */
+ u8 pool[16];
- while (nbytes) {
- extract_buf(tmp);
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
}
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
+ */
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
- return ret;
-}
+ if (unlikely(crng_init == 0)) {
+ crng_pre_init_inject(pool, sizeof(pool), true);
+ mix_pool_bytes(pool, sizeof(pool));
+ } else {
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_entropy_bits(1);
+ }
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding.
- */
-static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
-{
- trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
- nbytes = account(nbytes, min);
- return _extract_entropy(buf, nbytes);
+ memzero_explicit(pool, sizeof(pool));
}
-#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
+void add_interrupt_randomness(int irq)
{
-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- const bool print_once = false;
-#else
- static bool print_once __read_mostly;
-#endif
-
- if (print_once || crng_ready() ||
- (previous && (caller == READ_ONCE(*previous))))
- return;
- WRITE_ONCE(*previous, caller);
-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- print_once = true;
-#endif
- if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
-}
+ enum { MIX_INFLIGHT = 1U << 31 };
+ cycles_t cycles = random_get_entropy();
+ unsigned long now = jiffies;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
+ union {
+ u32 u32[4];
+ u64 u64[2];
+ unsigned long longs[16 / sizeof(long)];
+ } irq_data;
-/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not rely on the hardware random
- * number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch(). In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
- */
-static void _get_random_bytes(void *buf, int nbytes)
-{
- u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ if (cycles == 0)
+ cycles = get_reg(fast_pool, regs);
- trace_get_random_bytes(nbytes, _RET_IP_);
+ if (sizeof(cycles) == 8)
+ irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
+ else {
+ irq_data.u32[0] = cycles ^ irq;
+ irq_data.u32[1] = now;
+ }
- while (nbytes >= CHACHA_BLOCK_SIZE) {
- extract_crng(buf);
- buf += CHACHA_BLOCK_SIZE;
- nbytes -= CHACHA_BLOCK_SIZE;
+ if (sizeof(unsigned long) == 8)
+ irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+ else {
+ irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
+ irq_data.u32[3] = get_reg(fast_pool, regs);
}
- if (nbytes > 0) {
- extract_crng(tmp);
- memcpy(buf, tmp, nbytes);
- crng_backtrack_protect(tmp, nbytes);
- } else
- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
- memzero_explicit(tmp, sizeof(tmp));
-}
+ fast_mix(fast_pool->pool, irq_data.longs);
+ new_count = ++fast_pool->count;
-void get_random_bytes(void *buf, int nbytes)
-{
- static void *previous;
+ if (new_count & MIX_INFLIGHT)
+ return;
- warn_unseeded_randomness(&previous);
- _get_random_bytes(buf, nbytes);
+ if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) ||
+ unlikely(crng_init == 0)))
+ return;
+
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+ fast_pool->count |= MIX_INFLIGHT;
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
-EXPORT_SYMBOL(get_random_bytes);
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
/*
* Each time the timer fires, we expect that we got an unpredictable
@@ -1501,238 +1398,134 @@ static void entropy_timer(struct timer_list *t)
static void try_to_generate_entropy(void)
{
struct {
- unsigned long now;
+ cycles_t cycles;
struct timer_list timer;
} stack;
- stack.now = random_get_entropy();
+ stack.cycles = random_get_entropy();
/* Slow counter - or none. Don't even bother */
- if (stack.now == random_get_entropy())
+ if (stack.cycles == random_get_entropy())
return;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
- while (!crng_ready()) {
+ while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies + 1);
- mix_pool_bytes(&stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
schedule();
- stack.now = random_get_entropy();
+ stack.cycles = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
}
-/*
- * Wait for the urandom pool to be seeded and thus guaranteed to supply
- * cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
- *
- * Returns: 0 if the urandom pool has been seeded.
- * -ERESTARTSYS if the function was interrupted by a signal.
- */
-int wait_for_random_bytes(void)
-{
- if (likely(crng_ready()))
- return 0;
-
- do {
- int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
- if (ret)
- return ret > 0 ? 0 : ret;
-
- try_to_generate_entropy();
- } while (!crng_ready());
- return 0;
-}
-EXPORT_SYMBOL(wait_for_random_bytes);
-
-/*
- * Returns whether or not the urandom pool has been seeded and thus guaranteed
- * to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+/**********************************************************************
*
- * Returns: true if the urandom pool has been seeded.
- * false if the urandom pool has not been seeded.
- */
-bool rng_is_initialized(void)
-{
- return crng_ready();
-}
-EXPORT_SYMBOL(rng_is_initialized);
-
-/*
- * Add a callback function that will be invoked when the nonblocking
- * pool is initialised.
+ * Userspace reader/writer interfaces.
*
- * returns: 0 if callback is successfully added
- * -EALREADY if pool is already initialised (callback not called)
- * -ENOENT if module for callback is not alive
- */
-int add_random_ready_callback(struct random_ready_callback *rdy)
-{
- struct module *owner;
- unsigned long flags;
- int err = -EALREADY;
-
- if (crng_ready())
- return err;
-
- owner = rdy->owner;
- if (!try_module_get(owner))
- return -ENOENT;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (crng_ready())
- goto out;
-
- owner = NULL;
-
- list_add(&rdy->list, &random_ready_list);
- err = 0;
-
-out:
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-
- return err;
-}
-EXPORT_SYMBOL(add_random_ready_callback);
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
+ *
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
+ *
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
+ *
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
-/*
- * Delete a previously registered readiness callback function.
- */
-void del_random_ready_callback(struct random_ready_callback *rdy)
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+ flags)
{
- unsigned long flags;
- struct module *owner = NULL;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (!list_empty(&rdy->list)) {
- list_del_init(&rdy->list);
- owner = rdy->owner;
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-}
-EXPORT_SYMBOL(del_random_ready_callback);
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
-/*
- * This function will use the architecture-specific hardware random
- * number generator if it is available. The arch-specific hw RNG will
- * almost certainly be faster than what we can do in software, but it
- * is impossible to verify that it is implemented securely (as
- * opposed, to, say, the AES encryption of a sequence number using a
- * key known by the NSA). So it's useful if we need the speed, but
- * only if we're willing to trust the hardware manufacturer not to
- * have put in a back door.
- *
- * Return number of bytes filled in.
- */
-int __must_check get_random_bytes_arch(void *buf, int nbytes)
-{
- int left = nbytes;
- u8 *p = buf;
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
- trace_get_random_bytes_arch(left, _RET_IP_);
- while (left) {
- unsigned long v;
- int chunk = min_t(int, left, sizeof(unsigned long));
+ if (count > INT_MAX)
+ count = INT_MAX;
- if (!arch_get_random_long(&v))
- break;
+ if (!(flags & GRND_INSECURE) && !crng_ready()) {
+ int ret;
- memcpy(p, &v, chunk);
- p += chunk;
- left -= chunk;
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
-
- return nbytes - left;
+ return get_random_bytes_user(buf, count);
}
-EXPORT_SYMBOL(get_random_bytes_arch);
-/*
- * init_std_data - initialize pool with system data
- *
- * This function clears the pool's entropy count and mixes some system
- * data into the pool to prepare it for use. The pool is not cleared
- * as that can only decrease the entropy in the pool.
- */
-static void __init init_std_data(void)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
- int i;
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
- mix_pool_bytes(&now, sizeof(now));
- for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- mix_pool_bytes(&rv, sizeof(rv));
- }
- mix_pool_bytes(utsname(), sizeof(*(utsname())));
-}
+ __poll_t mask;
-/*
- * Note that setup_arch() may call add_device_randomness()
- * long before we get here. This allows seeding of the pools
- * with some platform dependent data very early in the boot
- * process. But it limits our options here. We must use
- * statically allocated structures that already have all
- * initializations complete at compile time. We should also
- * take care not to overwrite the precious per platform data
- * we were given.
- */
-int __init rand_initialize(void)
-{
- init_std_data();
- if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
- crng_global_init_time = jiffies;
- if (ratelimit_disable) {
- urandom_warning.interval = 0;
- unseeded_warning.interval = 0;
- }
- return 0;
+ poll_wait(file, &crng_init_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
+ if (crng_ready())
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (input_pool.entropy_count < POOL_MIN_BITS)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ return mask;
}
-#ifdef CONFIG_BLOCK
-void rand_initialize_disk(struct gendisk *disk)
+static int write_pool(const char __user *ubuf, size_t count)
{
- struct timer_rand_state *state;
+ size_t len;
+ int ret = 0;
+ u8 block[BLAKE2S_BLOCK_SIZE];
- /*
- * If kzalloc returns null, we just won't use that entropy
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state) {
- state->last_time = INITIAL_JIFFIES;
- disk->random = state;
+ while (count) {
+ len = min(count, sizeof(block));
+ if (copy_from_user(block, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ count -= len;
+ ubuf += len;
+ mix_pool_bytes(block, len);
+ cond_resched();
}
+
+out:
+ memzero_explicit(block, sizeof(block));
+ return ret;
}
-#endif
-static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
int ret;
- nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
- return ret;
+ ret = write_pool(buffer, count);
+ if (ret)
+ return ret;
+
+ return (ssize_t)count;
}
static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
@@ -1747,7 +1540,7 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
current->comm, nbytes);
}
- return urandom_read_nowarn(file, buf, nbytes, ppos);
+ return get_random_bytes_user(buf, nbytes);
}
static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
@@ -1758,62 +1551,7 @@ static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
-}
-
-static __poll_t random_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask;
-
- poll_wait(file, &crng_init_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
- if (crng_ready())
- mask |= EPOLLIN | EPOLLRDNORM;
- if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
- mask |= EPOLLOUT | EPOLLWRNORM;
- return mask;
-}
-
-static int write_pool(const char __user *buffer, size_t count)
-{
- size_t bytes;
- u32 t, buf[16];
- const char __user *p = buffer;
-
- while (count > 0) {
- int b, i = 0;
-
- bytes = min(count, sizeof(buf));
- if (copy_from_user(&buf, p, bytes))
- return -EFAULT;
-
- for (b = bytes; b > 0; b -= sizeof(u32), i++) {
- if (!arch_get_random_int(&t))
- break;
- buf[i] ^= t;
- }
-
- count -= bytes;
- p += bytes;
-
- mix_pool_bytes(buf, bytes);
- cond_resched();
- }
-
- return 0;
-}
-
-static ssize_t random_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- size_t ret;
-
- ret = write_pool(buffer, count);
- if (ret)
- return ret;
-
- return (ssize_t)count;
+ return get_random_bytes_user(buf, nbytes);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -1824,9 +1562,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
- /* inherently racy, no point locking */
- ent_count = POOL_ENTROPY_BITS();
- if (put_user(ent_count, p))
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1834,7 +1571,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(ent_count);
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_entropy_bits(ent_count);
+ return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1847,7 +1587,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
retval = write_pool((const char __user *)p, size);
if (retval < 0)
return retval;
- return credit_entropy_bits_safe(ent_count);
+ credit_entropy_bits(ent_count);
+ return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1856,15 +1597,17 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
+ if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (crng_init < 2)
+ if (!crng_ready())
return -ENODATA;
- crng_reseed(&primary_crng, true);
- WRITE_ONCE(crng_global_init_time, jiffies - 1);
+ crng_reseed(false);
return 0;
default:
return -EINVAL;
@@ -1895,37 +1638,34 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
- flags)
-{
- int ret;
-
- if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
- return -EINVAL;
-
- /*
- * Requesting insecure and blocking randomness at the same time makes
- * no sense.
- */
- if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
- return -EINVAL;
-
- if (count > INT_MAX)
- count = INT_MAX;
-
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
- if (flags & GRND_NONBLOCK)
- return -EAGAIN;
- ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
- }
- return urandom_read_nowarn(NULL, buf, count, NULL);
-}
/********************************************************************
*
- * Sysctl interface
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_MIN_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
*
********************************************************************/
@@ -1933,25 +1673,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
#include <linux/sysctl.h>
-static int min_write_thresh;
-static int max_write_thresh = POOL_BITS;
-static int random_min_urandom_seed = 60;
-static char sysctl_bootid[16];
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
+ * UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
- *
- * If the user accesses this via the proc interface, the UUID will be
- * returned as an ASCII string in the standard UUID format; if via the
- * sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
uuid = table->data;
if (!uuid) {
@@ -1966,32 +1709,17 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
spin_unlock(&bootid_spinlock);
}
- sprintf(buf, "%pU", uuid);
-
- fake_table.data = buf;
- fake_table.maxlen = sizeof(buf);
-
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+ return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
}
-static int sysctl_poolsize = POOL_BITS;
static struct ctl_table random_table[] = {
{
.procname = "poolsize",
@@ -2002,56 +1730,36 @@ static struct ctl_table random_table[] = {
},
{
.procname = "entropy_avail",
+ .data = &input_pool.entropy_count,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
+ .proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_bits,
+ .data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_write_thresh,
- .extra2 = &max_write_thresh,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
+ .data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
-#ifdef ADD_INTERRUPT_BENCH
- {
- .procname = "add_interrupt_avg_cycles",
- .data = &avg_cycles,
- .maxlen = sizeof(avg_cycles),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "add_interrupt_avg_deviation",
- .data = &avg_deviation,
- .maxlen = sizeof(avg_deviation),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
-#endif
{ }
};
@@ -2065,168 +1773,4 @@ static int __init random_sysctls_init(void)
return 0;
}
device_initcall(random_sysctls_init);
-#endif /* CONFIG_SYSCTL */
-
-struct batched_entropy {
- union {
- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
- };
- unsigned int position;
- spinlock_t batch_lock;
-};
-
-/*
- * Get a random word for internal kernel use only. The quality of the random
- * number is good as /dev/urandom, but there is no backtrack protection, with
- * the goal of being quite fast and not depleting entropy. In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once at any
- * point prior.
- */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
- batch->position = 0;
- }
- ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
- batch->position = 0;
- }
- ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
-
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu(cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
-}
-
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
-/* Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
- */
-void add_hwgenerator_randomness(const char *buffer, size_t count,
- size_t entropy)
-{
- if (unlikely(crng_init == 0)) {
- size_t ret = crng_fast_load(buffer, count);
- mix_pool_bytes(buffer, ret);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
- }
-
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait,
- !system_wq || kthread_should_stop() ||
- POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
- mix_pool_bytes(buffer, count);
- credit_entropy_bits(entropy);
-}
-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-
-/* Handle random seed passed by bootloader.
- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
- * it would be regarded as device data.
- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
- */
-void add_bootloader_randomness(const void *buf, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
- add_hwgenerator_randomness(buf, size, size * 8);
- else
- add_device_randomness(buf, size);
-}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+#endif
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 7c617edff4ca..3170d59d660c 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -267,11 +267,8 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
static int st33zp24_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
+ st33zp24_remove(chip);
return 0;
}
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index a75dafd39445..22d184884694 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -381,16 +381,11 @@ static int st33zp24_spi_probe(struct spi_device *dev)
* @param: client, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
*/
-static int st33zp24_spi_remove(struct spi_device *dev)
+static void st33zp24_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
-
- return 0;
+ st33zp24_remove(chip);
}
static const struct spi_device_id st33zp24_spi_id[] = {
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index ce9efb73c144..15b393e92c8e 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -511,10 +511,9 @@ _tpm_clean_answer:
}
EXPORT_SYMBOL(st33zp24_probe);
-int st33zp24_remove(struct tpm_chip *chip)
+void st33zp24_remove(struct tpm_chip *chip)
{
tpm_chip_unregister(chip);
- return 0;
}
EXPORT_SYMBOL(st33zp24_remove);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index 6747be1e2502..b387a476c555 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -34,5 +34,5 @@ int st33zp24_pm_resume(struct device *dev);
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
struct device *dev, int irq, int io_lpcpd);
-int st33zp24_remove(struct tpm_chip *chip);
+void st33zp24_remove(struct tpm_chip *chip);
#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index b009e7479b70..783d65fc71f0 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip);
}
-static void tpm_devs_release(struct device *dev)
-{
- struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
-
- /* release the master device reference */
- put_device(&chip->dev);
-}
-
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
@@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev_num = rc;
device_initialize(&chip->dev);
- device_initialize(&chip->devs);
chip->dev.class = tpm_class;
chip->dev.class->shutdown_pre = tpm_class_shutdown;
@@ -352,39 +343,20 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
- chip->devs.parent = pdev;
- chip->devs.class = tpmrm_class;
- chip->devs.release = tpm_devs_release;
- /* get extra reference on main device to hold on
- * behalf of devs. This holds the chip structure
- * while cdevs is in use. The corresponding put
- * is in the tpm_devs_release (TPM2 only)
- */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- get_device(&chip->dev);
-
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
- chip->devs.devt =
- MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
-
rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
if (rc)
goto out;
- rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
- if (rc)
- goto out;
if (!pdev)
chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
- cdev_init(&chip->cdevs, &tpmrm_fops);
chip->cdev.owner = THIS_MODULE;
- chip->cdevs.owner = THIS_MODULE;
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
@@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
return chip;
out:
- put_device(&chip->devs);
put_device(&chip->dev);
return ERR_PTR(rc);
}
@@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip)
}
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
- rc = cdev_device_add(&chip->cdevs, &chip->devs);
- if (rc) {
- dev_err(&chip->devs,
- "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- return rc;
- }
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
}
/* Make the chip available. */
@@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip)
idr_replace(&dev_nums_idr, chip, chip->dev_num);
mutex_unlock(&idr_lock);
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
return rc;
}
@@ -654,7 +624,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
- cdev_device_del(&chip->cdevs, &chip->devs);
+ tpm_devs_remove(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c08cbb306636..dc4c0a0a5129 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work)
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
- if (ret > 0) {
+
+ /*
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
+ * response. If ret is < 0 then tpm_dev_transmit failed and
+ * returned an error code.
+ */
+ if (ret != 0) {
priv->response_length = ret;
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 283f78211c3a..2163c6ee0d36 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
size_t cmdsiz);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 97e916856cf3..ffb35f0154c1 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
{
- mutex_lock(&chip->tpm_mutex);
- if (!tpm_chip_start(chip)) {
+
+ if (tpm_try_get_ops(chip) == 0) {
tpm2_flush_sessions(chip, space);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
- mutex_unlock(&chip->tpm_mutex);
+
kfree(space->context_buf);
kfree(space->session_buf);
}
@@ -574,3 +574,68 @@ out:
dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
return rc;
}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index aaa59a00eeae..184396b3af50 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -254,13 +254,12 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi)
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
-static int tpm_tis_spi_remove(struct spi_device *dev)
+static void tpm_tis_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
- return 0;
}
static const struct spi_device_id tpm_tis_spi_id[] = {
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 91c772e38bb5..5c865987ba5c 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -91,7 +91,7 @@ static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
len = proxy_dev->req_len;
- if (count < len) {
+ if (count < len || len > sizeof(proxy_dev->buffer)) {
mutex_unlock(&proxy_dev->buf_lock);
pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
count, len);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index da5b30771418..f53e0cf1ec7e 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -126,16 +126,16 @@ static void vtpm_cancel(struct tpm_chip *chip)
notify_remote_via_evtchn(priv->evtchn);
}
-static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
+static size_t shr_data_offset(struct vtpm_shared_page *shr)
{
- return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
+ return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
u32 ordinal;
unsigned long duration;
@@ -177,7 +177,7 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
size_t length = shr->length;
if (shr->state == VTPM_STATE_IDLE)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 2359889a35a0..e3c430539a17 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev)
list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
+ /* Device is going away, exit any polling for buffers */
+ virtio_break_device(vdev);
+ if (use_multiport(portdev))
+ flush_work(&portdev->control_work);
+ else
+ flush_work(&portdev->config_work);
+
/* Disable interrupts for vqs */
virtio_reset_device(vdev);
/* Finish up work that's lined up */
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index ad4256d54361..d4d67fbae869 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI
config COMMON_CLK_LAN966X
bool "Generic Clock Controller driver for LAN966X SoC"
+ depends on HAS_IOMEM
+ depends on OF
help
This driver provides support for Generic Clock Controller(GCK) on
LAN966X SoC. GCK generates and supplies clock to various peripherals
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 8f02c0b88000..f416f8bc2898 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -1544,14 +1544,12 @@ err_disable_oscin:
return ret;
}
-static int lmk04832_remove(struct spi_device *spi)
+static void lmk04832_remove(struct spi_device *spi)
{
struct lmk04832 *lmk = spi_get_drvdata(spi);
clk_disable_unprepare(lmk->oscin);
of_clk_del_provider(spi->dev.of_node);
-
- return 0;
}
static const struct spi_device_id lmk04832_id[] = {
{ "lmk04832", LMK04832 },
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 744d136b721b..15d61793f53b 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
},
[JZ4725B_CLK_I2S] = {
- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
.div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
- .gate = { CGU_REG_CLKGR, 6 },
},
[JZ4725B_CLK_SPI] = {
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 538e4963c915..5d2ae297e741 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index 4ef4ae231794..ad596d567f6a 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = {
static struct gdsc disp_cc_mdss_core_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "disp_cc_mdss_core_gdsc",
},
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 566fdfa0a15b..db9379634fb2 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 71aa630fa4bd..f09499999eb3 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
{ .hw = &gpll4.clkr.hw },
};
-static struct clk_rcg2 system_noc_clk_src = {
- .cmd_rcgr = 0x0120,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "system_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 config_noc_clk_src = {
- .cmd_rcgr = 0x0150,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "config_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
- .cmd_rcgr = 0x0190,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "periph_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
@@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_lpass_q6_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+
+ /*
+ * The following clocks should NOT be managed by this driver, but they once were
+ * mistakengly added. Now they are only here to indicate that they are not defined
+ * on purpose, even though the names will stay in the header file (for ABI sanity).
+ */
+ [CONFIG_NOC_CLK_SRC] = NULL,
+ [PERIPH_NOC_CLK_SRC] = NULL,
+ [SYSTEM_NOC_CLK_SRC] = NULL,
};
static struct gdsc *gcc_msm8994_gdscs[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 7e1dd8ccfa38..44520efc6c72 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
@@ -35,9 +35,14 @@
#define CFG_GDSCR_OFFSET 0x4
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
-#define EN_REST_WAIT_VAL (0x2 << 20)
-#define EN_FEW_WAIT_VAL (0x8 << 16)
-#define CLK_DIS_WAIT_VAL (0x2 << 12)
+#define EN_REST_WAIT_VAL 0x2
+#define EN_FEW_WAIT_VAL 0x8
+#define CLK_DIS_WAIT_VAL 0x2
+
+/* Transition delay shifts */
+#define EN_REST_WAIT_SHIFT 20
+#define EN_FEW_WAIT_SHIFT 16
+#define CLK_DIS_WAIT_SHIFT 12
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
@@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc)
*/
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
- val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+
+ if (!sc->en_rest_wait_val)
+ sc->en_rest_wait_val = EN_REST_WAIT_VAL;
+ if (!sc->en_few_wait_val)
+ sc->en_few_wait_val = EN_FEW_WAIT_VAL;
+ if (!sc->clk_dis_wait_val)
+ sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+
+ val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
+ sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
+ sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
+
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index d7cc4c21a9d4..ad313d7210bd 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_GDSC_H__
@@ -22,6 +22,9 @@ struct reset_controller_dev;
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
* @pwrsts: Possible powerdomain power states
+ * @en_rest_wait_val: transition delay value for receiving enr ack signal
+ * @en_few_wait_val: transition delay value for receiving enf ack signal
+ * @clk_dis_wait_val: transition delay value for halting clock
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
@@ -36,6 +39,9 @@ struct gdsc {
unsigned int clamp_io_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
+ unsigned int en_rest_wait_val;
+ unsigned int en_few_wait_val;
+ unsigned int clk_dis_wait_val;
const u8 pwrsts;
/* Powerdomain allowable state bitfields */
#define PWRSTS_OFF BIT(0)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cfb8ea0df3b1..1ea556e75494 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -713,7 +713,6 @@ config INGENIC_OST
config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
depends on OF || COMPILE_TEST
- select CLKSRC_MMIO
select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index eb596ff9e7bb..279ddff81ab4 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg)
int ret;
ret = kstrtouint(arg, 16, &base);
- if (ret)
- return ret;
+ if (ret) {
+ pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg);
+ return 1;
+ }
pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
base);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 1ecd52f903b8..9ab8221ee3c6 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -880,10 +880,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
-static void arch_timer_evtstrm_enable(int divider)
+static void arch_timer_evtstrm_enable(unsigned int divider)
{
u32 cntkctl = arch_timer_get_cntkctl();
+#ifdef CONFIG_ARM64
+ /* ECV is likely to require a large divider. Use the EVNTIS flag. */
+ if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) {
+ cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
+ divider -= 8;
+ }
+#endif
+
+ divider = min(divider, 15U);
cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
/* Set the divider and enable virtual event stream */
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
@@ -912,7 +921,7 @@ static void arch_timer_configure_evtstream(void)
lsb++;
/* enable event stream */
- arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
+ arch_timer_evtstrm_enable(max(0, lsb));
}
static void arch_counter_set_user_access(void)
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 6db3d5511b0f..f29c812b70c9 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -60,27 +60,18 @@
#define MCT_CLKEVENTS_RATING 350
#endif
+/* There are four Global timers starting with 0 offset */
+#define MCT_G0_IRQ 0
+/* Local timers count starts after global timer count */
+#define MCT_L0_IRQ 4
+/* Max number of IRQ as per DT binding document */
+#define MCT_NR_IRQS 20
+
enum {
MCT_INT_SPI,
MCT_INT_PPI
};
-enum {
- MCT_G0_IRQ,
- MCT_G1_IRQ,
- MCT_G2_IRQ,
- MCT_G3_IRQ,
- MCT_L0_IRQ,
- MCT_L1_IRQ,
- MCT_L2_IRQ,
- MCT_L3_IRQ,
- MCT_L4_IRQ,
- MCT_L5_IRQ,
- MCT_L6_IRQ,
- MCT_L7_IRQ,
- MCT_NR_IRQS,
-};
-
static void __iomem *reg_base;
static unsigned long clk_rate;
static unsigned int mct_int_type;
@@ -89,7 +80,11 @@ static int mct_irqs[MCT_NR_IRQS];
struct mct_clock_event_device {
struct clock_event_device evt;
unsigned long base;
- char name[10];
+ /**
+ * The length of the name must be adjusted if number of
+ * local timer interrupts grow over two digits
+ */
+ char name[11];
};
static void exynos4_mct_write(unsigned int value, unsigned long offset)
@@ -541,6 +536,11 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
* irqs are specified.
*/
nr_irqs = of_irq_count(np);
+ if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
+ pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
+ nr_irqs);
+ nr_irqs = ARRAY_SIZE(mct_irqs);
+ }
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
@@ -553,11 +553,14 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
mct_irqs[MCT_L0_IRQ], err);
} else {
for_each_possible_cpu(cpu) {
- int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ int mct_irq;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
pcpu_mevt->evt.irq = -1;
+ if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ break;
+ mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 55a8e198d2a1..523e37662a6e 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -110,7 +110,7 @@ static struct timer_of to_sysctr = {
},
.of_irq = {
.handler = sysctr_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 2cdc077a39f5..bd64a8a8427f 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -32,8 +32,8 @@
#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
-static int counter_width;
-static void __iomem *timer_base;
+static int counter_width __ro_after_init;
+static void __iomem *timer_base __ro_after_init;
static inline void tpm_timer_disable(void)
{
@@ -73,12 +73,12 @@ static unsigned long tpm_read_current_timer(void)
{
return tpm_read_counter();
}
-#endif
static u64 notrace tpm_read_sched_clock(void)
{
return tpm_read_counter();
}
+#endif
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -127,9 +127,9 @@ static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
static struct timer_of to_tpm = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
- .name = "i.MX7ULP TPM Timer",
+ .name = "i.MX TPM Timer",
.rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = tpm_set_state_shutdown,
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
@@ -137,7 +137,7 @@ static struct timer_of to_tpm = {
},
.of_irq = {
.handler = tpm_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
@@ -150,10 +150,10 @@ static int __init tpm_clocksource_init(void)
tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
register_current_timer_delay(&tpm_delay_timer);
-#endif
sched_clock_register(tpm_read_sched_clock, counter_width,
timer_of_rate(&to_tpm) >> 3);
+#endif
return clocksource_mmio_init(timer_base + TPM_CNT,
"imx-tpm",
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index cfa4ec7ef396..abce83d2f00b 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -42,8 +42,7 @@
#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
-#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */
-#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */
+#define MCHP_PIT64B_DEF_FREQ 5000000UL /* 5 MHz */
#define MCHP_PIT64B_NAME "pit64b"
@@ -165,7 +164,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
-static u64 mchp_pit64b_sched_read_clk(void)
+static u64 notrace mchp_pit64b_sched_read_clk(void)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
@@ -418,7 +417,6 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
bool clkevt)
{
- u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
struct mchp_pit64b_timer timer;
unsigned long clk_rate;
u32 irq = 0;
@@ -446,7 +444,7 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
}
/* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
- ret = mchp_pit64b_init_mode(&timer, freq);
+ ret = mchp_pit64b_init_mode(&timer, MCHP_PIT64B_DEF_FREQ);
if (ret)
goto irq_unmap;
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 529cc6a51cdb..c3f54d9912be 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np,
of_base->base = of_base->name ?
of_io_request_and_map(np, of_base->index, of_base->name) :
of_iomap(np, of_base->index);
- if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", of_base->name);
- return PTR_ERR(of_base->base);
+ if (IS_ERR_OR_NULL(of_base->base)) {
+ pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name);
+ return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM;
}
return 0;
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index b6f97960d8ee..2737407ff069 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
- if (of_machine_is_compatible("ti,omap3-beagle") ||
- of_machine_is_compatible("timll,omap3-devkit8000")) {
+ if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
}
@@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
return 0;
}
- if (pa == 0x48034000) /* dra7 dmtimer3 */
+ if (pa == 0x4882c000) /* dra7 dmtimer15 */
return dmtimer_percpu_timer_init(np, 0);
- else if (pa == 0x48036000) /* dra7 dmtimer4 */
+ else if (pa == 0x4882e000) /* dra7 dmtimer16 */
return dmtimer_percpu_timer_init(np, 1);
return 0;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 646ad385e490..ccac1c453080 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -358,7 +358,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
* other namespaces.
*/
if ((current_user_ns() != &init_user_ns) ||
- (task_active_pid_ns(current) != &init_pid_ns))
+ !task_is_in_init_pid_ns(current))
return;
/* Can only change if privileged. */
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 7e0957eea094..869894b74741 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -90,10 +90,8 @@ struct counter_device *counter_alloc(size_t sizeof_priv)
int err;
ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL);
- if (!ch) {
- err = -ENOMEM;
- goto err_alloc_ch;
- }
+ if (!ch)
+ return NULL;
counter = &ch->counter;
dev = &counter->dev;
@@ -123,9 +121,8 @@ err_chrdev_add:
err_ida_alloc:
kfree(ch);
-err_alloc_ch:
- return ERR_PTR(err);
+ return NULL;
}
EXPORT_SYMBOL_GPL(counter_alloc);
@@ -208,12 +205,12 @@ struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv
int err;
counter = counter_alloc(sizeof_priv);
- if (IS_ERR(counter))
- return counter;
+ if (!counter)
+ return NULL;
err = devm_add_action_or_reset(dev, devm_counter_put, counter);
if (err < 0)
- return ERR_PTR(err);
+ return NULL;
return counter;
}
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
index 7cc4d1d523ea..04eac41dad33 100644
--- a/drivers/counter/counter-sysfs.c
+++ b/drivers/counter/counter-sysfs.c
@@ -19,6 +19,11 @@
#include "counter-sysfs.h"
+static inline struct counter_device *counter_from_dev(struct device *dev)
+{
+ return container_of(dev, struct counter_device, dev);
+}
+
/**
* struct counter_attribute - Counter sysfs attribute
* @dev_attr: device attribute for sysfs
@@ -90,7 +95,7 @@ static ssize_t counter_comp_u8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u8 data = 0;
@@ -122,7 +127,7 @@ static ssize_t counter_comp_u8_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
bool bool_data = 0;
u8 data = 0;
@@ -158,7 +163,7 @@ static ssize_t counter_comp_u32_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
const struct counter_available *const avail = a->comp.priv;
int err;
u32 data = 0;
@@ -221,7 +226,7 @@ static ssize_t counter_comp_u32_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
struct counter_count *const count = a->parent;
struct counter_synapse *const synapse = a->comp.priv;
const struct counter_available *const avail = a->comp.priv;
@@ -281,7 +286,7 @@ static ssize_t counter_comp_u64_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
@@ -309,7 +314,7 @@ static ssize_t counter_comp_u64_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 647505957d4f..35f38ae67fb1 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -27,6 +27,10 @@ TRACE_EVENT(amd_pstate_perf,
TP_PROTO(unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity,
+ u64 freq,
+ u64 mperf,
+ u64 aperf,
+ u64 tsc,
unsigned int cpu_id,
bool changed,
bool fast_switch
@@ -35,6 +39,10 @@ TRACE_EVENT(amd_pstate_perf,
TP_ARGS(min_perf,
target_perf,
capacity,
+ freq,
+ mperf,
+ aperf,
+ tsc,
cpu_id,
changed,
fast_switch
@@ -44,6 +52,10 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long, min_perf)
__field(unsigned long, target_perf)
__field(unsigned long, capacity)
+ __field(unsigned long long, freq)
+ __field(unsigned long long, mperf)
+ __field(unsigned long long, aperf)
+ __field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
__field(bool, changed)
__field(bool, fast_switch)
@@ -53,15 +65,23 @@ TRACE_EVENT(amd_pstate_perf,
__entry->min_perf = min_perf;
__entry->target_perf = target_perf;
__entry->capacity = capacity;
+ __entry->freq = freq;
+ __entry->mperf = mperf;
+ __entry->aperf = aperf;
+ __entry->tsc = tsc;
__entry->cpu_id = cpu_id;
__entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
+ (unsigned long long)__entry->freq,
+ (unsigned long long)__entry->mperf,
+ (unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
(__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9ce75ed11f8e..7be38bc6a673 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -66,6 +66,18 @@ MODULE_PARM_DESC(shared_mem,
static struct cpufreq_driver amd_pstate_driver;
/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
* struct amd_cpudata - private CPU data for AMD P-State
* @cpu: CPU number
* @req: constraint request to apply
@@ -81,6 +93,9 @@ static struct cpufreq_driver amd_pstate_driver;
* @min_freq: the frequency that mapped to lowest_perf
* @nominal_freq: the frequency that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
* @boost_supported: check whether the Processor or SBIOS supports boost mode
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
@@ -102,6 +117,10 @@ struct amd_cpudata {
u32 nominal_freq;
u32 lowest_nonlinear_freq;
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
bool boost_supported;
};
@@ -211,6 +230,39 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
max_perf, fast_switch);
}
+static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+{
+ u64 aperf, mperf, tsc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = rdtsc();
+
+ if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
+ local_irq_restore(flags);
+ return false;
+ }
+
+ local_irq_restore(flags);
+
+ cpudata->cur.aperf = aperf;
+ cpudata->cur.mperf = mperf;
+ cpudata->cur.tsc = tsc;
+ cpudata->cur.aperf -= cpudata->prev.aperf;
+ cpudata->cur.mperf -= cpudata->prev.mperf;
+ cpudata->cur.tsc -= cpudata->prev.tsc;
+
+ cpudata->prev.aperf = aperf;
+ cpudata->prev.mperf = mperf;
+ cpudata->prev.tsc = tsc;
+
+ cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
+
+ return true;
+}
+
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
@@ -226,8 +278,11 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
- trace_amd_pstate_perf(min_perf, des_perf, max_perf,
- cpudata->cpu, (value != prev), fast_switch);
+ if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
+ trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
+ cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
+ cpudata->cpu, (value != prev), fast_switch);
+ }
if (value == prev)
return;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8d95536ee22..80f535cc8a75 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 08515f7e515f..b6bd0ff35323 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -146,7 +146,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
-static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -161,7 +161,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -177,7 +177,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -195,7 +195,7 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -220,7 +220,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 63f7c219062b..0d42cf8b88d8 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
/* Common sysfs tunables */
/*
- * store_sampling_rate - update sampling rate effective immediately if needed.
+ * sampling_rate_store - update sampling rate effective immediately if needed.
*
* If new rate is smaller than the old, simply updating
* dbs.sampling_rate might not be appropriate. For example, if the
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
* This must be called with dbs_data->mutex held, otherwise traversing
* policy_dbs_list isn't safe.
*/
-ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -80,7 +80,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
return count;
}
-EXPORT_SYMBOL_GPL(store_sampling_rate);
+EXPORT_SYMBOL_GPL(sampling_rate_store);
/**
* gov_update_cpu_data - Update CPU load data.
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index bab8e6140377..a5a0bc3cc23e 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -51,7 +51,7 @@ static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
}
#define gov_show_one(_gov, file_name) \
-static ssize_t show_##file_name \
+static ssize_t file_name##_show \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
@@ -60,7 +60,7 @@ static ssize_t show_##file_name \
}
#define gov_show_one_common(file_name) \
-static ssize_t show_##file_name \
+static ssize_t file_name##_show \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
@@ -68,12 +68,10 @@ static ssize_t show_##file_name \
}
#define gov_attr_ro(_name) \
-static struct governor_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
+static struct governor_attr _name = __ATTR_RO(_name)
#define gov_attr_rw(_name) \
-static struct governor_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+static struct governor_attr _name = __ATTR_RW(_name)
/* Common to all CPUs of a policy */
struct policy_dbs_info {
@@ -176,7 +174,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index a6f365b9cc1a..771770ea0ed0 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -8,11 +8,6 @@
#include "cpufreq_governor.h"
-static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
-{
- return container_of(kobj, struct gov_attr_set, kobj);
-}
-
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
return container_of(attr, struct governor_attr, attr);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a41ea4729b8..e8fbf970ff07 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -202,7 +202,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -220,7 +220,7 @@ static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
return count;
}
-static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -237,7 +237,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -265,7 +265,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -290,7 +290,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index bc7f7e6759bd..846bb3a78788 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1692,6 +1692,37 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
}
}
+static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
+{
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+ /*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
+ * If powerup EPP is something other than chipset default 0x80 and
+ * - is more performance oriented than 0x80 (default balance_perf EPP)
+ * - But less performance oriented than performance EPP
+ * then use this as new balance_perf EPP.
+ */
+ if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
+ cpudata->epp_default > HWP_EPP_PERFORMANCE) {
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
+ return;
+ }
+
+ /*
+ * Use hard coded value per gen to update the balance_perf
+ * and default EPP.
+ */
+ cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+ intel_pstate_set_epp(cpudata, cpudata->epp_default);
+}
+
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
@@ -1705,12 +1736,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
if (cpudata->epp_default >= 0)
return;
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
- cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
- } else {
- cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
- intel_pstate_set_epp(cpudata, cpudata->epp_default);
- }
+ intel_pstate_update_epp_defaults(cpudata);
}
static int atom_get_min_pstate(void)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c538a153ee82..3e000e1a75c6 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -668,9 +668,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
- struct acpi_device *d;
+ struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle);
- if (acpi_bus_get_device(obj_handle, &d))
+ if (!d)
return 0;
*return_value = acpi_driver_data(d);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 12ab4014af71..d289036beff2 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1172,14 +1172,14 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;
+ if (!x86_match_cpu(powernow_k8_ids))
+ return -ENODEV;
+
if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
- if (!x86_match_cpu(powernow_k8_ids))
- return -ENODEV;
-
cpus_read_lock();
for_each_online_cpu(i) {
smp_call_function_single(i, check_supported_cpu, &ret, 1);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 05f3d7876e44..effbb680b453 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
- IRQF_ONESHOT, data->irq_name, data);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
if (ret) {
dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
return 0;
@@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ enable_irq(data->throttle_irq);
+}
+
static struct freq_attr *qcom_cpufreq_hw_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_scaling_boost_freqs,
@@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
+ .ready = qcom_cpufreq_ready,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index fcc53215bac8..3a39a7f48b77 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -108,11 +108,11 @@ static int __init haltpoll_init(void)
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- cpuidle_poll_state_init(drv);
-
if (!kvm_para_available() || !haltpoll_want())
return -ENODEV;
+ cpuidle_poll_state_init(drv);
+
ret = cpuidle_register_driver(drv);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4f705674f94f..7b2d138bc83e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -808,6 +808,16 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hardware accelerator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_SHA3
+ help
+ Xilinx ZynqMP has SHA3 engine used for secure hash calculation.
+ This driver interfaces with SHA3 hardware engine.
+ Select this if you want to use the ZynqMP module
+ for SHA3 hash computation.
+
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 1fe5120eb966..0a4fff23d272 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
-obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
+obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += keembay/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 54ae8d16e493..35e3cadccac2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 88194718a806..859b7522faaa 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(buf);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9ef1c85c4aaa..554e400d41ca 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sun8i_ss_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 80e89066dbd1..319fe3279a71 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -30,6 +30,8 @@
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 3c073eb3db03..1a71ed49d233 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(pad);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index c6865cbd334b..e79514fce731 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine,
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = meson_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index fe0558403191..f72c6b3e4ad8 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2509,6 +2509,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 1b13f601fd95..d1628112dacc 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2508,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x510:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index e30786ec9f2d..9fd7b8e439d2 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1130,6 +1130,7 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xf00) {
+ case 0x800:
case 0x700:
dd->caps.has_dma = 1;
dd->caps.has_cfb_3keys = 1;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 2e9c0d214363..9e7308e39b30 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitmap.h>
#include <linux/workqueue.h>
#include "nitrox_csr.h"
@@ -120,6 +121,7 @@ static void pf2vf_resp_handler(struct work_struct *work)
void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
{
+ DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
struct nitrox_vfdev *vfdev;
struct pf2vf_work *pfwork;
u64 value, reg_addr;
@@ -129,7 +131,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(0..63) */
reg_addr = NPS_PKT_MBOX_INT_LO;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
@@ -151,7 +154,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(64..127) */
reg_addr = NPS_PKT_MBOX_INT_HI;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index ed174883c8e3..6bf088bcdd11 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -440,7 +440,7 @@ struct aqmq_command_s {
/**
* struct ctx_hdr - Book keeping data about the crypto context
* @pool: Pool used to allocate crypto context
- * @dma: Base DMA address of the cypto context
+ * @dma: Base DMA address of the crypto context
* @ctx_dma: Actual usable crypto context for NITROX
*/
struct ctx_hdr {
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 812b4ac9afd6..dc5b7bf7e1fd 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -55,6 +55,11 @@ static const struct pci_device_id zip_id_table[] = {
{ 0, }
};
+static void zip_debugfs_init(void);
+static void zip_debugfs_exit(void);
+static int zip_register_compression_device(void);
+static void zip_unregister_compression_device(void);
+
void zip_reg_write(u64 val, u64 __iomem *addr)
{
writeq(val, addr);
@@ -235,6 +240,15 @@ static int zip_init_hw(struct zip_device *zip)
return 0;
}
+static void zip_reset(struct zip_device *zip)
+{
+ union zip_cmd_ctl cmd_ctl;
+
+ cmd_ctl.u_reg64 = 0x0ull;
+ cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
+ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+}
+
static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -282,8 +296,21 @@ static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_release_regions;
+ /* Register with the Kernel Crypto Interface */
+ err = zip_register_compression_device();
+ if (err < 0) {
+ zip_err("ZIP: Kernel Crypto Registration failed\n");
+ goto err_register;
+ }
+
+ /* comp-decomp statistics are handled with debugfs interface */
+ zip_debugfs_init();
+
return 0;
+err_register:
+ zip_reset(zip);
+
err_release_regions:
if (zip->reg_base)
iounmap(zip->reg_base);
@@ -305,16 +332,17 @@ err_free_device:
static void zip_remove(struct pci_dev *pdev)
{
struct zip_device *zip = pci_get_drvdata(pdev);
- union zip_cmd_ctl cmd_ctl;
int q = 0;
if (!zip)
return;
+ zip_debugfs_exit();
+
+ zip_unregister_compression_device();
+
if (zip->reg_base) {
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+ zip_reset(zip);
iounmap(zip->reg_base);
}
@@ -585,7 +613,7 @@ DEFINE_SHOW_ATTRIBUTE(zip_regs);
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
-static void __init zip_debugfs_init(void)
+static void zip_debugfs_init(void)
{
if (!debugfs_initialized())
return;
@@ -604,7 +632,7 @@ static void __init zip_debugfs_init(void)
}
-static void __exit zip_debugfs_exit(void)
+static void zip_debugfs_exit(void)
{
debugfs_remove_recursive(zip_debugfs_root);
}
@@ -615,48 +643,7 @@ static void __exit zip_debugfs_exit(void) { }
#endif
/* debugfs - end */
-static int __init zip_init_module(void)
-{
- int ret;
-
- zip_msg("%s\n", DRV_NAME);
-
- ret = pci_register_driver(&zip_driver);
- if (ret < 0) {
- zip_err("ZIP: pci_register_driver() failed\n");
- return ret;
- }
-
- /* Register with the Kernel Crypto Interface */
- ret = zip_register_compression_device();
- if (ret < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_pci_unregister;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return ret;
-
-err_pci_unregister:
- pci_unregister_driver(&zip_driver);
- return ret;
-}
-
-static void __exit zip_cleanup_module(void)
-{
- zip_debugfs_exit();
-
- /* Unregister from the kernel crypto interface */
- zip_unregister_compression_device();
-
- /* Unregister this driver for pci zip devices */
- pci_unregister_driver(&zip_driver);
-}
-
-module_init(zip_init_module);
-module_exit(zip_cleanup_module);
+module_pci_driver(zip_driver);
MODULE_AUTHOR("Cavium Inc");
MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index e6dcd8cedd53..bed331953ff9 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -69,7 +69,6 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
@@ -104,9 +103,7 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_aes_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d718db224be4..7d4b4ad1db1f 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
return 0;
}
+static void ccp_dma_release(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+ tasklet_kill(&chan->cleanup_tasklet);
+ list_del_rcu(&dma_chan->device_node);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
return 0;
err_reg:
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
err_cache:
@@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
return;
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 8fd774a10edc..6ab93dfd478a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -413,7 +413,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret;
+ int rc, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a5e041d9d2cf..11e0278c8631 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
{
int ret = 0;
+ if (!nbytes) {
+ *mapped_nents = 0;
+ *lbytes = 0;
+ *nents = 0;
+ return 0;
+ }
+
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 78833491f534..309da6334a0a 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
&ctx_p->user.key_dma_addr);
/* Free key buffer in context */
- kfree_sensitive(ctx_p->user.key);
dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ kfree_sensitive(ctx_p->user.key);
}
struct tdes_keys {
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index c1c2b1d86663..14d0d83d388d 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -23,8 +23,8 @@ static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
- struct scatterlist *in_sg = areq->src;
- struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
struct scatterlist *sg;
if (areq->cryptlen == 0 || areq->cryptlen % 16) {
@@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sl3516_ce_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index c5b84a5ea350..453390044181 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -3840,7 +3840,7 @@ static void qm_clear_queues(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_resetting)
+ if (qp->is_in_kernel && qp->is_resetting)
memset(qp->qdma.va, 0, qp->qdma.size);
}
@@ -4295,7 +4295,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
static int qm_vf_read_qos(struct hisi_qm *qm)
{
int cnt = 0;
- int ret;
+ int ret = -EINVAL;
/* reset mailbox qos val */
qm->mb_qos = 0;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 6a45bd23b363..a91635c348b5 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -42,6 +42,8 @@
#define SEC_DE_OFFSET_V3 9
#define SEC_SCENE_OFFSET_V3 5
#define SEC_CKEY_OFFSET_V3 13
+#define SEC_CTR_CNT_OFFSET 25
+#define SEC_CTR_CNT_ROLLOVER 2
#define SEC_SRC_SGL_OFFSET_V3 11
#define SEC_DST_SGL_OFFSET_V3 14
#define SEC_CALG_OFFSET_V3 4
@@ -63,6 +65,7 @@
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
+#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
#define SEC_PBUF_SZ 512
@@ -237,7 +240,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
- pr_err("err bd type [%d]\n", type);
+ pr_err("err bd type [%u]\n", type);
return;
}
@@ -641,13 +644,15 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
c_ctx->fallback = false;
+
+ /* Currently, only XTS mode need fallback tfm when using 192bit key */
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
return 0;
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc fallback tfm!\n");
+ pr_err("failed to alloc xts mode fallback tfm!\n");
return PTR_ERR(c_ctx->fbtfm);
}
@@ -808,7 +813,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback) {
+ if (c_ctx->fallback && c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -1300,6 +1305,10 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC;
sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
+ /* Set the CTR counter mode is 128bit rollover */
+ sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
+ SEC_CTR_CNT_OFFSET);
+
if (req->use_pbuf) {
bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
@@ -1614,7 +1623,7 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
} else {
- sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
}
sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
@@ -2032,13 +2041,12 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
struct skcipher_request *sreq, bool encrypt)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
struct device *dev = ctx->dev;
int ret;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
-
if (!c_ctx->fbtfm) {
- dev_err(dev, "failed to check fallback tfm\n");
+ dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
return -EINVAL;
}
@@ -2219,6 +2227,10 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
if (c_mode == SEC_CMODE_CCM) {
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
+ dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ return -EINVAL;
+ }
ret = aead_iv_demension_check(req);
if (ret) {
dev_err(dev, "aead input iv param error!\n");
@@ -2256,7 +2268,6 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
req->cryptlen <= authsize))) {
- dev_err(dev, "Kunpeng920 not support 0 length!\n");
ctx->a_ctx.fallback = true;
return -EINVAL;
}
@@ -2284,9 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
struct aead_request *aead_req,
bool encrypt)
{
- struct aead_request *subreq = aead_request_ctx(aead_req);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
+ struct aead_request *subreq;
+ int ret;
/* Kunpeng920 aead mode not support input 0 size */
if (!a_ctx->fallback_aead_tfm) {
@@ -2294,6 +2306,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
return -EINVAL;
}
+ subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
+ if (!subreq)
+ return -ENOMEM;
+
aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
aead_request_set_callback(subreq, aead_req->base.flags,
aead_req->base.complete, aead_req->base.data);
@@ -2301,8 +2317,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
aead_req->cryptlen, aead_req->iv);
aead_request_set_ad(subreq, aead_req->assoclen);
- return encrypt ? crypto_aead_encrypt(subreq) :
- crypto_aead_decrypt(subreq);
+ if (encrypt)
+ ret = crypto_aead_encrypt(subreq);
+ else
+ ret = crypto_aead_decrypt(subreq);
+ aead_request_free(subreq);
+
+ return ret;
}
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 9f71c358a6d3..5e039b50e9d4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -354,8 +354,10 @@ struct sec_sqe3 {
* akey_len: 9~14 bits
* a_alg: 15~20 bits
* key_sel: 21~24 bits
- * updata_key: 25 bits
- * reserved: 26~31 bits
+ * ctr_count_mode/sm4_xts: 25~26 bits
+ * sva_prefetch: 27 bits
+ * key_wrap_num: 28~30 bits
+ * update_key: 31 bits
*/
__le32 auth_mac_key;
__le32 salt;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 26d3ab1d308b..0b9906ff69e3 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -90,6 +90,10 @@
SEC_USER1_WB_DATA_SSV)
#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
+#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220
+#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224
+#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))
+#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_PREFETCH_CFG 0x301130
@@ -335,6 +339,41 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static void sec_engine_sva_config(struct hisi_qm *qm)
+{
+ u32 reg;
+
+ if (qm->ver > QM_HW_V2) {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ reg &= SEC_USER1_SMMU_MASK_V3;
+ reg |= SEC_USER1_SMMU_NORMAL_V3;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ } else {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ }
+}
+
static void sec_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
@@ -426,26 +465,18 @@ static int sec_engine_init(struct hisi_qm *qm)
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
-
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
- reg &= SEC_USER1_SMMU_MASK;
- if (qm->use_sva && qm->ver == QM_HW_V2)
- reg |= SEC_USER1_SMMU_SVA;
- else
- reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
+ sec_engine_sva_config(qm);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
- /* Enable sm4 extra mode, as ctr/ecb */
- writel_relaxed(SEC_BD_ERR_CHK_EN0,
- qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+ /* HW V2 enable sm4 extra mode, as ctr/ecb */
+ if (qm->ver < QM_HW_V3)
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 9125199f1702..a48591af12d0 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -47,6 +47,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic
Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index ccbef01888d4..01c48ddc4eeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1639,11 +1639,8 @@ static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
- struct cpt_device_desc desc;
- desc = *ldesc;
- *ldesc = *rdesc;
- *rdesc = desc;
+ swap(*ldesc, *rdesc);
}
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index b681bd2dc6ad..36d72e35ebeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -204,7 +204,6 @@ static int alloc_command_queues(struct otx_cptvf *cptvf,
/* per queue initialization */
for (i = 0; i < cptvf->num_queues; i++) {
- c_size = 0;
rem_q_size = q_size;
first = NULL;
last = NULL;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index fb56824cb0a6..5012b7e669f0 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -157,5 +157,6 @@ struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 9074876d38e5..a317319696ef 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -202,3 +202,17 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
+
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(mbox, 0);
+ err = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index b691b6c1d5c4..4fcaf61a70e3 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -26,12 +26,22 @@
*/
#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+/*
+ * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction
+ * queue has less than 320 free entries. So, increase HW instruction queue
+ * size by 320 and give 320 entries less for SW/NIX RX as a workaround.
+ */
+#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40)
+
/* CPT instruction queue length in bytes */
-#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
- OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_INST_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \
+ OTX2_CPT_INST_QLEN_EXTRA_BYTES)
/* CPT instruction group queue length in bytes */
-#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+#define OTX2_CPT_INST_GRP_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16)
/* CPT FC length in bytes */
#define OTX2_CPT_Q_FC_LEN 128
@@ -179,7 +189,8 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
- lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
+ OTX2_CPT_EXTRA_SIZE_DIV40;
otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 05b2d9c650e1..936174b012e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -46,6 +46,7 @@ struct otx2_cptpf_dev {
struct workqueue_struct *flr_wq;
struct cptpf_flr_work *flr_work;
+ struct mutex lock; /* serialize mailbox access */
unsigned long cap_flag;
u8 pf_id; /* RVU PF number */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 1720a5bb7016..a402ccfac557 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -140,10 +140,13 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
vf = flr_work - pf->flr_work;
+ mutex_lock(&pf->lock);
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
- if (!req)
+ if (!req) {
+ mutex_unlock(&pf->lock);
return;
+ }
req->sig = OTX2_MBOX_REQ_SIG;
req->id = MBOX_MSG_VF_FLR;
@@ -151,16 +154,19 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+ if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
- if (vf >= 64) {
- reg = 1;
- vf = vf - 64;
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
}
- /* Clear transaction pending register */
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ mutex_unlock(&pf->lock);
}
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
@@ -468,6 +474,7 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
goto error;
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ mutex_init(&cptpf->lock);
return 0;
error:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 186f1c1190c1..dee0aa60b698 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -18,9 +18,12 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg;
int ret;
+ mutex_lock(&cptpf->lock);
msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
- if (msg == NULL)
+ if (msg == NULL) {
+ mutex_unlock(&cptpf->lock);
return -ENOMEM;
+ }
memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
(uint8_t *)req + sizeof(struct mbox_msghdr), size);
@@ -29,15 +32,19 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
msg->sig = req->sig;
msg->ver = req->ver;
- otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
- ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
if (ret == -EIO) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ dev_warn(&cptpf->pdev->dev,
+ "AF not responding to VF%d messages\n", vf->vf_id);
+ mutex_unlock(&cptpf->lock);
return ret;
- } else if (ret) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
- return -EFAULT;
}
+ mutex_unlock(&cptpf->lock);
return 0;
}
@@ -204,6 +211,10 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
if (err == -ENOMEM || err == -EIO)
break;
offset = msg->next_msgoff;
+ /* Write barrier required for VF responses which are handled by
+ * PF driver and not forwarded to AF.
+ */
+ smp_wmb();
}
/* Send mbox responses to VF */
if (mdev->num_msgs)
@@ -350,6 +361,8 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
process_afpf_mbox_msg(cptpf, msg);
offset = msg->next_msgoff;
+ /* Sync VF response ready to be sent */
+ smp_wmb();
mdev->msgs_acked++;
}
otx2_mbox_reset(afpf_mbox, 0);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 4c8ebdf671ca..9cba2f714c7e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1076,6 +1076,39 @@ static void delete_engine_grps(struct pci_dev *pdev,
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
}
+#define PCI_DEVID_CN10K_RNM 0xA098
+#define RNM_ENTROPY_STATUS 0x8
+
+static void rnm_to_cpt_errata_fixup(struct device *dev)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int timeout = 5000;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto put_pdev;
+
+ while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ dev_warn(dev, "RNM is not producing entropy\n");
+ break;
+ }
+ }
+
+ iounmap(base);
+
+put_pdev:
+ pci_dev_put(pdev);
+}
+
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
{
@@ -1111,6 +1144,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
+ u64 reg_val;
int ret = 0;
mutex_lock(&eng_grps->lock);
@@ -1189,9 +1223,17 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
if (is_dev_otx2(pdev))
goto unlock;
+
+ /*
+ * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
+ * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
+ */
+ rnm_to_cpt_errata_fixup(&pdev->dev);
+
/*
* Configure engine group mask to allow context prefetching
- * for the groups.
+ * for the groups and enable random number request, to enable
+ * CPT to request random numbers from RNM.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
@@ -1203,6 +1245,18 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+
+ /*
+ * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
+ * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
+ * encounters a fault/poison, a rare case may result in
+ * unpredictable data being delivered to a CPT engine.
+ */
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
+ BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+
mutex_unlock(&eng_grps->lock);
return 0;
@@ -1753,7 +1807,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
struct otx2_cpt_eng_grp_info *grp;
struct otx2_cpt_engs_rsvd *engs;
- u32 mask[4];
int i, j;
pr_debug("Engine groups global info");
@@ -1785,6 +1838,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
+ u32 mask[5] = { };
+
get_engs_info(grp, engs_info,
2 * OTX2_CPT_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 2748a3327e39..f8f8542ce3e4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void)
{
int i, err = 0;
- if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
- for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
- otx2_cpt_skciphers[i].base.cra_flags &=
- ~CRYPTO_ALG_DEAD;
-
- err = crypto_register_skciphers(otx2_cpt_skciphers,
- ARRAY_SIZE(otx2_cpt_skciphers));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d19e5ffb5104..d6f9e2fe863d 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
}
- for_each_sg(req->src, src, sg_nents(src), i) {
+ for_each_sg(req->src, src, sg_nents(req->src), i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 4e304f6081e4..7584a34ba88c 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -962,7 +962,7 @@ static struct attribute *nx842_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nx842_attribute_group = {
+static const struct attribute_group nx842_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = nx842_sysfs_entries,
};
@@ -992,7 +992,7 @@ static struct attribute *nxcop_caps_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nxcop_caps_attr_group = {
+static const struct attribute_group nxcop_caps_attr_group = {
.name = "nx_gzip_caps",
.attrs = nxcop_caps_sysfs_entries,
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a196bb8b1701..581211a92628 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1093,7 +1093,7 @@ static struct attribute *omap_aes_attrs[] = {
NULL,
};
-static struct attribute_group omap_aes_attr_group = {
+static const struct attribute_group omap_aes_attr_group = {
.attrs = omap_aes_attrs,
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index f6bf53c00b61..4b37dc69a50c 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2045,7 +2045,7 @@ static struct attribute *omap_sham_attrs[] = {
NULL,
};
-static struct attribute_group omap_sham_attr_group = {
+static const struct attribute_group omap_sham_attr_group = {
.attrs = omap_sham_attrs,
};
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6d10edc40aca..fb5970a68484 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -52,7 +53,7 @@ static const char *const dev_cfg_services[] = {
static int get_service_enabled(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- u32 ret;
+ int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
@@ -229,7 +230,7 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
void __iomem *csr = misc_bar->virt_addr;
/* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
}
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
@@ -256,19 +257,19 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
/* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
- csr |= ADF_4XXX_PM_SOU;
- ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
/* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
/* Poll status register to make sure the device is powered up */
ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_4XXX_PM_INIT_STATE,
- ADF_4XXX_PM_POLL_DELAY_US,
- ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
- ADF_4XXX_PM_STATUS);
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
if (ret)
dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
@@ -354,6 +355,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 12e4fb9b40ce..1034752845ca 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -39,20 +39,6 @@
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
#define ADF_4XXX_NUM_BANKS_PER_VF 4
-/* Error source registers */
-#define ADF_4XXX_ERRSOU0 (0x41A200)
-#define ADF_4XXX_ERRSOU1 (0x41A204)
-#define ADF_4XXX_ERRSOU2 (0x41A208)
-#define ADF_4XXX_ERRSOU3 (0x41A20C)
-
-/* Error source mask registers */
-#define ADF_4XXX_ERRMSK0 (0x41A210)
-#define ADF_4XXX_ERRMSK1 (0x41A214)
-#define ADF_4XXX_ERRMSK2 (0x41A218)
-#define ADF_4XXX_ERRMSK3 (0x41A21C)
-
-#define ADF_4XXX_VFLNOTIFY BIT(7)
-
/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
#define ADF_4XXX_ARB_OFFSET (0x0)
@@ -63,16 +49,6 @@
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-/* Power management */
-#define ADF_4XXX_PM_POLL_DELAY_US 20
-#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
-#define ADF_4XXX_PM_STATUS (0x50A00C)
-#define ADF_4XXX_PM_INTERRUPT (0x50A028)
-#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
-#define ADF_4XXX_PM_INIT_STATE BIT(21)
-/* Power management source in ERRSOU2 and ERRMSK2 */
-#define ADF_4XXX_PM_SOU BIT(18)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index a6c78b9c730b..fa4c350c1bf9 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -75,6 +75,13 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 7e191a42a5c7..f25a6c8edfc7 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -12,6 +12,7 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_gen2_hw_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_pm.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2d4cd7c7cf33..a03c6cf72331 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -184,6 +184,8 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
int (*init_device)(struct adf_accel_dev *accel_dev);
+ int (*enable_pm)(struct adf_accel_dev *accel_dev);
+ bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 498eb6f690e3..3b6184c35081 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -251,6 +251,43 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ * 000 - 64us
+ * 001 - 128us
+ * 010 - 256us
+ * 011 - 512us
+ * 100 - 1ms
+ * 101 - 2ms
+ * 110 - 4ms
+ * 111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp = {0};
+ struct icp_qat_fw_init_admin_req req = {0};
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ if (!accel_dev->admin) {
+ dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+ return -EFAULT;
+ }
+
+ req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+ req.idle_filter = idle_delay;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 76f4f96ec5eb..e8c9b77c0d66 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -102,6 +102,7 @@ void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -188,6 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, u32 mem_size, char *obj_name);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6f64aa693146..e8ac932bbaab 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -419,6 +419,9 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_chr_drv_create())
goto err_chr_dev;
+ if (adf_init_misc_wq())
+ goto err_misc_wq;
+
if (adf_init_aer())
goto err_aer;
@@ -440,6 +443,8 @@ err_vf_wq:
err_pf_wq:
adf_exit_aer();
err_aer:
+ adf_exit_misc_wq();
+err_misc_wq:
adf_chr_drv_destroy();
err_chr_dev:
mutex_destroy(&adf_ctl_lock);
@@ -449,6 +454,7 @@ err_chr_dev:
static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
+ adf_exit_misc_wq();
adf_exit_aer();
adf_exit_vf_wq();
adf_exit_pf_wq();
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index f0f71ca44ca3..43b8f864806b 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -122,6 +122,20 @@ do { \
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0 (0x41A200)
+#define ADF_GEN4_ERRSOU1 (0x41A204)
+#define ADF_GEN4_ERRSOU2 (0x41A208)
+#define ADF_GEN4_ERRSOU3 (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0 (0x41A210)
+#define ADF_GEN4_ERRMSK1 (0x41A214)
+#define ADF_GEN4_ERRMSK2 (0x41A218)
+#define ADF_GEN4_ERRMSK3 (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY BIT(7)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
index 8efbedf63bc8..d80d493a7756 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -9,15 +9,12 @@
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_MAX_NUM_VFS 16
-
#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
/* VF2PF interrupt source registers */
-#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
-#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
-#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
+#define ADF_4XXX_VM2PF_SOU 0x41A180
+#define ADF_4XXX_VM2PF_MSK 0x41A1C0
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
@@ -41,51 +38,30 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
{
- int i;
u32 sou, mask;
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- u32 vf_mask = 0;
- for (i = 0; i < num_csrs; i++) {
- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
- sou &= ~mask;
- vf_mask |= sou << i;
- }
+ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
- return vf_mask;
+ return sou & ~mask;
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
new file mode 100644
index 000000000000..7037c0892a8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+ PM_NO_CHANGE = 0,
+ PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+ struct work_struct pm_irq_work;
+ struct adf_accel_dev *accel_dev;
+ u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 msg;
+
+ msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+ if (msg & ADF_GEN4_PM_MSG_PENDING)
+ return -EBUSY;
+
+ /* Send HOST_MSG */
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+ msg |= ADF_GEN4_PM_MSG_PENDING;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+ /* Poll status register to make sure the HOST_MSG has been processed */
+ return read_poll_timeout(ADF_CSR_RD, msg,
+ !(msg & ADF_GEN4_PM_MSG_PENDING),
+ ADF_GEN4_PM_MSG_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+ ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+ struct adf_gen4_pm_data *pm_data =
+ container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+ struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 pm_int_sts = pm_data->pm_int_sts;
+ u32 val;
+
+ /* PM Idle interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+ /* Issue host message to FW */
+ if (send_host_msg(accel_dev))
+ dev_warn_ratelimited(&GET_DEV(accel_dev),
+ "Failed to send host msg to FW\n");
+ }
+
+ /* Clear interrupt status */
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+ /* Reenable PM interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_gen4_pm_data *pm_data = NULL;
+ u32 errsou2;
+ u32 errmsk2;
+ u32 val;
+
+ /* Only handle the interrupt triggered by PM */
+ errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ if (errmsk2 & ADF_GEN4_PM_SOU)
+ return false;
+
+ errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+ if (!(errsou2 & ADF_GEN4_PM_SOU))
+ return false;
+
+ /* Disable interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+ pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+ if (!pm_data)
+ return false;
+
+ pm_data->pm_int_sts = val;
+ pm_data->accel_dev = accel_dev;
+
+ INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+ adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ int ret;
+ u32 val;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Enable default PM interrupts: IDLE, THROTTLE */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+ val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+ /* Clear interrupt status */
+ val |= ADF_GEN4_PM_INT_STS_MASK;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+ /* Unmask PM Interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
new file mode 100644
index 000000000000..f8f8a9ee29e5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US 20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE BIT(20)
+#define ADF_GEN4_PM_INIT_STATE BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT (ADF_GEN4_PM_IDLE_INT_EN | \
+ ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS BIT(0)
+#define ADF_GEN4_PM_IDLE_STS BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+ ADF_GEN4_PM_IDLE_STS | \
+ ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 2edc63c6b6ca..c2c718f1b489 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -181,6 +181,12 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
+ /* Enable Power Management */
+ if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+ return -EFAULT;
+ }
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index 4ca482aa69f7..a35149f8bf1e 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -16,6 +16,7 @@
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
+static struct workqueue_struct *adf_misc_wq;
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
@@ -123,6 +124,17 @@ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
}
#endif /* CONFIG_PCI_IOV */
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ if (hw_data->handle_pm_interrupt &&
+ hw_data->handle_pm_interrupt(accel_dev))
+ return true;
+
+ return false;
+}
+
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
@@ -133,6 +145,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
+ if (adf_handle_pm_int(accel_dev))
+ return IRQ_HANDLED;
+
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
@@ -341,3 +356,30 @@ err_out:
return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+ adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+ if (adf_misc_wq)
+ destroy_workqueue(adf_misc_wq);
+
+ adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+ return queue_work(adf_misc_wq, work);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
index 14b222691c9c..1141258db4b6 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
@@ -96,7 +96,7 @@ int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct capabilities_v3 cap_msg = { { 0 }, };
+ struct capabilities_v3 cap_msg = { 0 };
unsigned int len = sizeof(cap_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
@@ -141,7 +141,7 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
{
- struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ struct ring_to_svc_map_v1 rts_map_msg = { 0 };
unsigned int len = sizeof(rts_map_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index afe59a7684ac..56cb827f93ea 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
};
enum icp_qat_fw_init_admin_resp_status {
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 7234c4940fae..67c9588e89df 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -161,6 +161,13 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 2026cc6be8f0..6356402a2c9e 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -387,7 +387,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
page = image->page;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
- if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+ unsigned long ae_assigned = uof_image->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
if (!test_bit(ae, &cfg_ae_mask))
@@ -664,8 +666,9 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
continue;
for (i = 0; i < obj_handle->uimage_num; i++) {
- if (!test_bit(ae, (unsigned long *)
- &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+ unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
mflag = 1;
if (qat_uclo_init_ae_data(obj_handle, ae, i))
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 99ba8d51d102..11f30fd48c14 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
{
unsigned int currsize = 0;
u32 val;
+ int ret;
/* read random data from hardware */
do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
+ ret = readl_poll_timeout(rng->base + PRNG_STATUS, val,
+ val & PRNG_STATUS_DATA_AVAIL,
+ 200, 10000);
+ if (ret)
+ return ret;
val = readl_relaxed(rng->base + PRNG_DATA_OUT);
if (!val)
- break;
+ return -EINVAL;
if ((max - currsize) >= WORD_SZ) {
memcpy(data, &val, WORD_SZ);
@@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
- break;
}
} while (currsize < max);
- return currsize;
+ return 0;
}
static int qcom_rng_generate(struct crypto_rng *tfm,
@@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
- return 0;
+ return ret;
}
static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 1cece1a7d3f0..5bbf0d2722e1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.exit = rk_ablk_exit_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 97277b7150cb..5a57c9afd8c8 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1264,7 +1264,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
dev_dbg(dev, "[%s]", __func__);
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 51a6e1a42434..5157c118d642 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1658,7 +1658,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index c85fab7ef0bd..b2c28b87f14b 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -2,7 +2,11 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_GHASH
+ select CRYPTO_XTS
default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 534e32daf76a..730feff5b5f2 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
new file mode 100644
index 000000000000..43ff170ff1c2
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SHA Driver.
+ * Copyright (c) 2022 Xilinx Inc.
+ */
+#include <linux/cacheflush.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
+
+enum zynqmp_sha_op {
+ ZYNQMP_SHA3_INIT = 1,
+ ZYNQMP_SHA3_UPDATE = 2,
+ ZYNQMP_SHA3_FINAL = 4,
+};
+
+struct zynqmp_sha_drv_ctx {
+ struct shash_alg sha3_384;
+ struct device *dev;
+};
+
+struct zynqmp_sha_tfm_ctx {
+ struct device *dev;
+ struct crypto_shash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct shash_desc fbk_req;
+};
+
+static dma_addr_t update_dma_addr, final_dma_addr;
+static char *ubuf, *fbuf;
+
+static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+{
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *fallback_tfm;
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm))
+ return PTR_ERR(fallback_tfm);
+
+ tfm_ctx->fbk_tfm = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
+
+ return 0;
+}
+
+static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+{
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+}
+
+static int zynqmp_sha_init(struct shash_desc *desc)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_init(&dctx->fbk_req);
+}
+
+static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_update(&dctx->fbk_req, data, length);
+}
+
+static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_final(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_finup(&dctx->fbk_req, data, length, out);
+}
+
+static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_import(&dctx->fbk_req, in);
+}
+
+static int zynqmp_sha_export(struct shash_desc *desc, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_export(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ unsigned int remaining_len = len;
+ int update_size;
+ int ret;
+
+ ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+ if (ret)
+ return ret;
+
+ while (remaining_len != 0) {
+ memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ } else {
+ update_size = remaining_len;
+ remaining_len = 0;
+ }
+ memcpy(ubuf, data, update_size);
+ flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
+ ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
+ if (ret)
+ return ret;
+
+ data += update_size;
+ }
+
+ ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
+ memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
+
+ return ret;
+}
+
+static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+ .sha3_384 = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .init_tfm = zynqmp_sha_init_tfm,
+ .exit_tfm = zynqmp_sha_exit_tfm,
+ .descsize = sizeof(struct zynqmp_sha_desc_ctx),
+ .statesize = sizeof(struct sha3_state),
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "zynqmp-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+ u32 v;
+
+ /* Verify the hardware is present */
+ err = zynqmp_pm_get_api_version(&v);
+ if (err)
+ return err;
+
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
+ if (err < 0) {
+ dev_err(dev, "Failed to register shash alg.\n");
+ return err;
+ }
+
+ sha3_drv_ctx.dev = dev;
+ platform_set_drvdata(pdev, &sha3_drv_ctx);
+
+ ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
+ if (!ubuf) {
+ err = -ENOMEM;
+ goto err_shash;
+ }
+
+ fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
+ if (!fbuf) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ return 0;
+
+err_mem:
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+
+err_shash:
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ sha3_drv_ctx.dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+ dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-sha3-384",
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index d33a0613ed0c..5494d745ced5 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -346,8 +346,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
}
static const struct address_space_operations dev_dax_aops = {
- .set_page_dirty = __set_page_dirty_no_writeback,
- .invalidatepage = noop_invalidatepage,
+ .dirty_folio = noop_dirty_folio,
};
static int dax_open(struct inode *inode, struct file *filp)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e3029389d809..aedc15eabeac 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -282,7 +282,7 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
struct dax_device *dax_dev;
struct inode *inode;
- dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
+ dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL);
if (!dax_dev)
return NULL;
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 56bf5ad01ad5..8f5848aa144f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -14,6 +14,7 @@
#include <linux/xarray.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/dma-heap.h>
@@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
return -EINVAL;
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
/* Get the kernel ioctl cmd that matches */
kcmd = dma_heap_ioctl_cmds[nr];
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a1da2b4b6d73..1476156af74b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
__func__, atchan->irq_status);
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
- !(atchan->irq_status & error_mask))
+ !(atchan->irq_status & error_mask)) {
+ spin_unlock_irq(&atchan->lock);
return;
+ }
if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 110de8a60058..858400e42ec0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2968,7 +2968,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
pm_runtime_force_suspend(dev);
- amba_pclk_unprepare(pcdev);
+ clk_unprepare(pcdev->pclk);
return 0;
}
@@ -2978,7 +2978,7 @@ static int __maybe_unused pl330_resume(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
int ret;
- ret = amba_pclk_prepare(pcdev);
+ ret = clk_prepare(pcdev->pclk);
if (ret)
return ret;
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 8a6bf291a73f..daafea5bc35d 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
- goto e_dma_alloc;
+ goto e_destroy_pool;
}
cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
- if (ret)
- goto e_pool;
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_free_dma;
+ }
/* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
- goto e_dmaengine;
+ goto e_free_irq;
/* Set up debugfs entries */
ptdma_debugfs_setup(pt);
return 0;
-e_dmaengine:
+e_free_irq:
free_irq(pt->pt_irq, pt);
-e_dma_alloc:
+e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
-e_pool:
- dev_err(dev, "unable to allocate an IRQ\n");
+e_destroy_pool:
dma_pool_destroy(pt->cmd_q.dma_pool);
return ret;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 481f45c77ce1..13d12d660cc2 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 158e5e7defae..b26ed690f03c 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0)
+ if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+ pm_runtime_put(schan->dev);
+ }
pm_runtime_barrier(schan->dev);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a42164389ebc..d5d55732adba 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
if (ret)
- goto err_clk;
+ goto pm_disable;
return 0;
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_clk:
clk_disable_unprepare(stm32_dmamux->clk);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3a6d2416cb0f..e7e8e624a436 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq);
- return -ENODEV;
+ return irq;
}
/* Arria10 has a 2nd IRQ */
@@ -1083,8 +1083,46 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
#ifdef CONFIG_EDAC_ALTERA_SDRAM
+/*
+ * A legacy U-Boot bug only enabled memory mapped access to the ECC Enable
+ * register if ECC is enabled. Linux checks the ECC Enable register to
+ * determine ECC status.
+ * Use an SMC call (which always works) to determine ECC enablement.
+ */
+static int altr_s10_sdram_check_ecc_deps(struct altr_edac_device_dev *device)
+{
+ const struct edac_device_prv_data *prv = device->data;
+ unsigned long sdram_ecc_addr;
+ struct arm_smccc_res result;
+ struct device_node *np;
+ phys_addr_t sdram_addr;
+ u32 read_reg;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl");
+ if (!np)
+ goto sdram_err;
+
+ sdram_addr = of_translate_address(np, of_get_address(np, 0,
+ NULL, NULL));
+ of_node_put(np);
+ sdram_ecc_addr = (unsigned long)sdram_addr + prv->ecc_en_ofst;
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sdram_ecc_addr,
+ 0, 0, 0, 0, 0, 0, &result);
+ read_reg = (unsigned int)result.a1;
+ ret = (int)result.a0;
+ if (!ret && (read_reg & prv->ecc_enable_mask))
+ return 0;
+
+sdram_err:
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: No ECC present or ECC disabled.\n",
+ device->edac_dev_name);
+ return -ENODEV;
+}
+
static const struct edac_device_prv_data s10_sdramecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = altr_s10_sdram_check_ecc_deps,
.ce_clear_mask = ALTR_S10_ECC_SERRPENA,
.ue_clear_mask = ALTR_S10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_S10_ECC_EN,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index fba609ada0e6..812baa48b290 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,6 +15,21 @@ static struct msr __percpu *msrs;
static struct amd64_family_type *fam_type;
+static inline u32 get_umc_reg(u32 reg)
+{
+ if (!fam_type->flags.zn_regs_v2)
+ return reg;
+
+ switch (reg) {
+ case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
+ case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
+ case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
+ }
+
+ WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
+ return 0;
+}
+
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
@@ -1429,8 +1444,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
- if (pvt->dram_type == MEM_LRDDR4) {
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
+ if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
+ amd_smn_read(pvt->mc_node_id,
+ umc_base + get_umc_reg(UMCCH_ADDR_CFG),
+ &tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
@@ -1505,7 +1522,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
- pvt->csels[umc].m_cnt = 2;
+ pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
}
} else {
@@ -1545,7 +1562,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
- umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+ umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
@@ -1616,19 +1633,49 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
+static void determine_memory_type_df(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i;
+
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
+ umc->dram_type = MEM_EMPTY;
+ continue;
+ }
+
+ /*
+ * Check if the system supports the "DDR Type" field in UMC Config
+ * and has DDR5 DIMMs in use.
+ */
+ if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR5;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR5;
+ else
+ umc->dram_type = MEM_DDR5;
+ } else {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR4;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR4;
+ else
+ umc->dram_type = MEM_DDR4;
+ }
+
+ edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
+ }
+}
+
static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
- if (pvt->umc) {
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
- }
+ if (pvt->umc)
+ return determine_memory_type_df(pvt);
switch (pvt->fam) {
case 0xf:
@@ -2149,6 +2196,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
{
u32 addr_mask_orig, addr_mask_deinterleaved;
u32 msb, weight, num_zero_bits;
+ int cs_mask_nr = csrow_nr;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -2164,17 +2212,33 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
return size;
/*
- * There is one mask per DIMM, and two Chip Selects per DIMM.
- * CS0 and CS1 -> DIMM0
- * CS2 and CS3 -> DIMM1
+ * Family 17h introduced systems with one mask per DIMM,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 and CS1 -> MASK0 / DIMM0
+ * CS2 and CS3 -> MASK1 / DIMM1
+ *
+ * Family 19h Model 10h introduced systems with one mask per Chip Select,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 -> MASK0 -> DIMM0
+ * CS1 -> MASK1 -> DIMM0
+ * CS2 -> MASK2 -> DIMM1
+ * CS3 -> MASK3 -> DIMM1
+ *
+ * Keep the mask number equal to the Chip Select number for newer systems,
+ * and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
+ if (!fam_type->flags.zn_regs_v2)
+ cs_mask_nr >>= 1;
+
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
- addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+ addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
/*
* The number of zero bits in the mask is equal to the number of bits
@@ -2930,6 +2994,7 @@ static struct amd64_family_type family_types[] = {
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
+ .flags.zn_regs_v2 = 1,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -3368,7 +3433,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
@@ -3452,7 +3517,9 @@ skip:
read_dct_base_mask(pvt);
determine_memory_type(pvt);
- edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
+
+ if (!pvt->umc)
+ edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
}
@@ -3548,7 +3615,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
pvt->mc_node_id, cs);
dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
- dimm->mtype = pvt->dram_type;
+ dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 352bda9803f6..38e5ad95d010 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -273,8 +273,11 @@
#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
#define UMCCH_ADDR_MASK_SEC 0x28
+#define UMCCH_ADDR_MASK_SEC_DDR5 0x30
#define UMCCH_ADDR_CFG 0x30
+#define UMCCH_ADDR_CFG_DDR5 0x40
#define UMCCH_DIMM_CFG 0x80
+#define UMCCH_DIMM_CFG_DDR5 0x90
#define UMCCH_UMC_CFG 0x100
#define UMCCH_SDP_CTRL 0x104
#define UMCCH_ECC_CTRL 0x14C
@@ -344,6 +347,9 @@ struct amd64_umc {
u32 sdp_ctrl; /* SDP Control reg */
u32 ecc_ctrl; /* DRAM ECC Control reg */
u32 umc_cap_hi; /* Capabilities High reg */
+
+ /* cache the dram_type */
+ enum mem_type dram_type;
};
struct amd64_pvt {
@@ -391,7 +397,12 @@ struct amd64_pvt {
/* place to store error injection parameters prior to issue */
struct error_injection injection;
- /* cache the dram_type */
+ /*
+ * cache the dram_type
+ *
+ * NOTE: Don't use this for Family 17h and later.
+ * Use dram_type in struct amd64_umc instead.
+ */
enum mem_type dram_type;
struct amd64_umc *umc; /* UMC registers */
@@ -480,11 +491,22 @@ struct low_ops {
unsigned cs_mode, int cs_mask_nr);
};
+struct amd64_family_flags {
+ /*
+ * Indicates that the system supports the new register offsets, etc.
+ * first introduced with Family 19h Model 10h.
+ */
+ __u64 zn_regs_v2 : 1,
+
+ __reserved : 63;
+};
+
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
+ struct amd64_family_flags flags;
struct low_ops ops;
};
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 5e7593753799..9a61d92bdf42 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -163,13 +163,14 @@ CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
/* Base Attributes of the EDAC_DEVICE ECC object */
-static struct ctl_info_attribute *device_ctrl_attr[] = {
- &attr_ctl_info_panic_on_ue,
- &attr_ctl_info_log_ue,
- &attr_ctl_info_log_ce,
- &attr_ctl_info_poll_msec,
+static struct attribute *device_ctrl_attrs[] = {
+ &attr_ctl_info_panic_on_ue.attr,
+ &attr_ctl_info_log_ue.attr,
+ &attr_ctl_info_log_ce.attr,
+ &attr_ctl_info_poll_msec.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_ctrl);
/*
* edac_device_ctrl_master_release
@@ -217,7 +218,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
static struct kobj_type ktype_device_ctrl = {
.release = edac_device_ctrl_master_release,
.sysfs_ops = &device_ctl_info_ops,
- .default_attrs = (struct attribute **)device_ctrl_attr,
+ .default_groups = device_ctrl_groups,
};
/*
@@ -389,17 +390,18 @@ INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
/* list of edac_dev 'instance' attributes */
-static struct instance_attribute *device_instance_attr[] = {
- &attr_instance_ce_count,
- &attr_instance_ue_count,
+static struct attribute *device_instance_attrs[] = {
+ &attr_instance_ce_count.attr,
+ &attr_instance_ue_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_instance);
/* The 'ktype' for each edac_dev 'instance' */
static struct kobj_type ktype_instance_ctrl = {
.release = edac_device_ctrl_instance_release,
.sysfs_ops = &device_instance_ops,
- .default_attrs = (struct attribute **)device_instance_attr,
+ .default_groups = device_instance_groups,
};
/* edac_dev -> instance -> block information */
@@ -487,17 +489,18 @@ BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
/* list of edac_dev 'block' attributes */
-static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
- &attr_block_ce_count,
- &attr_block_ue_count,
+static struct attribute *device_block_attrs[] = {
+ &attr_block_ce_count.attr,
+ &attr_block_ue_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_block);
/* The 'ktype' for each edac_dev 'block' */
static struct kobj_type ktype_block_ctrl = {
.release = edac_device_ctrl_block_release,
.sysfs_ops = &device_block_ops,
- .default_attrs = (struct attribute **)device_block_attr,
+ .default_groups = device_block_groups,
};
/* block ctor/dtor code */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9d9aabdec96b..d2715774af6f 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -213,12 +213,12 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
else if (size > sizeof(char))
align = sizeof(short);
else
- return (char *)ptr;
+ return ptr;
- r = (unsigned long)p % align;
+ r = (unsigned long)ptr % align;
if (r == 0)
- return (char *)ptr;
+ return ptr;
*p += align - r;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 53042af7262e..888d5728ecef 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -135,17 +135,18 @@ INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
/* pci instance attributes */
-static struct instance_attribute *pci_instance_attr[] = {
- &attr_instance_pe_count,
- &attr_instance_npe_count,
+static struct attribute *pci_instance_attrs[] = {
+ &attr_instance_pe_count.attr,
+ &attr_instance_npe_count.attr,
NULL
};
+ATTRIBUTE_GROUPS(pci_instance);
/* the ktype for a pci instance */
static struct kobj_type ktype_pci_instance = {
.release = edac_pci_instance_release,
.sysfs_ops = &pci_instance_ops,
- .default_attrs = (struct attribute **)pci_instance_attr,
+ .default_groups = pci_instance_groups,
};
/*
@@ -292,15 +293,16 @@ EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
/* Base Attributes of the memory ECC object */
-static struct edac_pci_dev_attribute *edac_pci_attr[] = {
- &edac_pci_attr_check_pci_errors,
- &edac_pci_attr_edac_pci_log_pe,
- &edac_pci_attr_edac_pci_log_npe,
- &edac_pci_attr_edac_pci_panic_on_pe,
- &edac_pci_attr_pci_parity_count,
- &edac_pci_attr_pci_nonparity_count,
+static struct attribute *edac_pci_attrs[] = {
+ &edac_pci_attr_check_pci_errors.attr,
+ &edac_pci_attr_edac_pci_log_pe.attr,
+ &edac_pci_attr_edac_pci_log_npe.attr,
+ &edac_pci_attr_edac_pci_panic_on_pe.attr,
+ &edac_pci_attr_pci_parity_count.attr,
+ &edac_pci_attr_pci_nonparity_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(edac_pci);
/*
* edac_pci_release_main_kobj
@@ -327,7 +329,7 @@ static void edac_pci_release_main_kobj(struct kobject *kobj)
static struct kobj_type ktype_edac_pci_main_kobj = {
.release = edac_pci_release_main_kobj,
.sysfs_ops = &edac_pci_sysfs_ops,
- .default_attrs = (struct attribute **)edac_pci_attr,
+ .default_groups = edac_pci_groups,
};
/**
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 2ccd1db5e98f..7197f9fa0245 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- rc = -EINVAL;
+ rc = irq;
goto out_err;
}
rc = devm_request_irq(&pdev->dev, irq,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 75cb91055c17..e5cfb01353d8 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
+ depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b406b3f78f46..d76bab3aaac4 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2112,7 +2112,7 @@ static void __exit scmi_driver_exit(void)
}
module_exit(scmi_driver_exit);
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index a7e762c352f9..1e1a51510e83 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1059,14 +1059,14 @@ static bool __init sdei_present_acpi(void)
return true;
}
-static int __init sdei_init(void)
+void __init sdei_init(void)
{
struct platform_device *pdev;
int ret;
ret = platform_driver_register(&sdei_driver);
if (ret || !sdei_present_acpi())
- return ret;
+ return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
0, NULL, 0);
@@ -1076,17 +1076,8 @@ static int __init sdei_init(void)
pr_info("Failed to register ACPI:SDEI platform device %d\n",
ret);
}
-
- return ret;
}
-/*
- * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
- * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
- * by device_initcall(). We want to be called in the middle.
- */
-subsys_initcall_sync(sdei_init);
-
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 4c3201e290e2..ea84108035eb 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -24,7 +24,7 @@ static bool dump_properties __initdata;
static int __init dump_properties_enable(char *arg)
{
dump_properties = true;
- return 0;
+ return 1;
}
__setup("dump_apple_properties", dump_properties_enable);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 0ef086e43090..7e771c56c13c 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record)
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- preemptible(), record->size, record->psi->buf);
+ false, record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
if (!schedule_work(&efivar_work))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ae79c3300129..5502e176d51b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -212,7 +212,7 @@ static int __init efivar_ssdt_setup(char *str)
memcpy(efivar_ssdt, str, strlen(str));
else
pr_warn("efivar_ssdt: name too long: %s\n", str);
- return 0;
+ return 1;
}
__setup("efivar_ssdt=", efivar_ssdt_setup);
@@ -722,6 +722,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
systab_hdr->revision >> 16,
systab_hdr->revision & 0xffff,
vendor);
+
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
+ !strcmp(vendor, "Apple")) {
+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
+ }
}
static __initdata char memory_type_name[][13] = {
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 2363fee9211c..9cc556013d08 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
- EFI_KIMG_ALIGN >> 10);
+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+ SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 380e4e251399..9c460843442f 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
static u32 hartid;
-static u32 get_boot_hartid_from_fdt(void)
+static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
@@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void)
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
- return U32_MAX;
+ return -EINVAL;
chosen_node = fdt_path_offset(fdt, "/chosen");
if (chosen_node < 0)
- return U32_MAX;
+ return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
if (!prop || len != sizeof(u32))
- return U32_MAX;
+ return -EINVAL;
- return fdt32_to_cpu(*prop);
+ hartid = fdt32_to_cpu(*prop);
+ return 0;
}
efi_status_t check_platform_features(void)
{
- hartid = get_boot_hartid_from_fdt();
- if (hartid == U32_MAX) {
+ int ret;
+
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
efi_err("/chosen/boot-hartid missing or invalid!\n");
return EFI_UNSUPPORTED;
}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 38722d2009e2..5ed0602c2f75 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -359,4 +359,4 @@ static int __init efi_mokvar_sysfs_init(void)
}
return err;
}
-device_initcall(efi_mokvar_sysfs_init);
+fs_initcall(efi_mokvar_sysfs_init);
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index abdc8a6a3963..cae590bd08f2 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
{
const struct efivar_operations *ops;
efi_status_t status;
+ unsigned long varsize;
if (!__efivars)
return -EINVAL;
@@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
return efivar_entry_set_nonblocking(name, vendor, attributes,
size, data);
+ varsize = size + ucs2_strsize(name, 1024);
if (!block) {
if (down_trylock(&efivars_lock))
return -EBUSY;
+ status = check_var_size_nonblocking(attributes, varsize);
} else {
if (down_interruptible(&efivars_lock))
return -EINTR;
+ status = check_var_size(attributes, varsize);
}
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
up(&efivars_lock);
return -ENOSPC;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 450c5f6a1cbf..5e5b0bb2e4e0 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1121,6 +1121,32 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_addr = lower_32_bits(address);
+ u32 upper_addr = upper_32_bits(address);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
+ size, flags, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
+
+/**
* zynqmp_pm_register_notifier() - PM API for register a subsystem
* to be notified about specific
* event/error.
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4a55cdf089d6..e00c33310517 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -163,15 +163,13 @@ exit_destroy:
return ret;
}
-static int gen_74x164_remove(struct spi_device *spi)
+static void gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
gpiod_set_value_cansleep(chip->gpiod_oe, 0);
gpiochip_remove(&chip->gpio_chip);
mutex_destroy(&chip->lock);
-
- return 0;
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 869dc952cf45..0cb2664085cf 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- return gpiod_get_value(fwd->descs[offset]);
+ return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
+ : gpiod_get_value(fwd->descs[offset]);
}
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
for_each_set_bit(i, mask, fwd->chip.ngpio)
descs[j++] = fwd->descs[i];
- error = gpiod_get_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
+ else
+ error = gpiod_get_array_value(j, descs, NULL, values);
if (error)
return error;
@@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- gpiod_set_value(fwd->descs[offset], value);
+ if (chip->can_sleep)
+ gpiod_set_value_cansleep(fwd->descs[offset], value);
+ else
+ gpiod_set_value(fwd->descs[offset], value);
}
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
descs[j++] = fwd->descs[i];
}
- gpiod_set_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ else
+ gpiod_set_array_value(j, descs, NULL, values);
}
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 51cd6f98d1c7..161c4751c5f7 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -443,14 +443,12 @@ static int max3191x_probe(struct spi_device *spi)
return 0;
}
-static int max3191x_remove(struct spi_device *spi)
+static void max3191x_remove(struct spi_device *spi)
{
struct max3191x_chip *max3191x = spi_get_drvdata(spi);
gpiochip_remove(&max3191x->gpio);
mutex_destroy(&max3191x->lock);
-
- return 0;
}
static int __init max3191x_register_driver(struct spi_driver *sdrv)
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 5862d73bf325..11813f41d460 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -64,11 +64,9 @@ static int max7301_probe(struct spi_device *spi)
return ret;
}
-static int max7301_remove(struct spi_device *spi)
+static void max7301_remove(struct spi_device *spi)
{
__max730x_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id max7301_id[] = {
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 31d2be1bebc8..cd9b16dbe1a9 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -134,7 +134,7 @@ exit_destroy:
return ret;
}
-static int mc33880_remove(struct spi_device *spi)
+static void mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
@@ -142,8 +142,6 @@ static int mc33880_remove(struct spi_device *spi)
gpiochip_remove(&mc->chip);
mutex_destroy(&mc->lock);
-
- return 0;
}
static struct spi_driver mc33880_driver = {
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index ccaad1cb3c2e..d8a26e503ca5 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -239,7 +239,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg->chip.offset = bank * MTK_BANK_WIDTH;
rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e099c39e0355..80ddc43fd875 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -986,7 +986,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
writel_relaxed(0, base + bank->regs->ctrl);
}
-static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
+static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
+ struct device *pm_dev)
{
struct gpio_irq_chip *irq;
static int gpio;
@@ -1052,6 +1053,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
if (ret)
return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
+ irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev);
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
@@ -1402,7 +1404,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
irqc->name = dev_name(&pdev->dev);
irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqc->parent_device = dev;
bank->irq = platform_get_irq(pdev, 0);
if (bank->irq <= 0) {
@@ -1466,7 +1467,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_mod_init(bank);
- ret = omap_gpio_chip_init(bank, irqc);
+ ret = omap_gpio_chip_init(bank, irqc, dev);
if (ret) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 8e04054cf07e..81a47ae09ff8 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -163,15 +163,13 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return 0;
}
-static int pisosr_gpio_remove(struct spi_device *spi)
+static void pisosr_gpio_remove(struct spi_device *spi)
{
struct pisosr_gpio *gpio = spi_get_drvdata(spi);
gpiochip_remove(&gpio->chip);
mutex_destroy(&gpio->lock);
-
- return 0;
}
static const struct spi_device_id pisosr_gpio_id_table[] = {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index bd2e16d6e21c..3a76538f27fa 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -530,7 +530,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip = &p->irq_chip;
irq_chip->name = "gpio-rcar";
- irq_chip->parent_device = dev;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
@@ -552,6 +551,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
+ irq_domain_set_pm_device(gpio_chip->irq.domain, dev);
ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler,
IRQF_SHARED, name, p);
if (ret) {
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index a4c4e4584f5b..099e358d2491 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
+ if (type == IRQ_TYPE_EDGE_BOTH) {
if (bank->gpio_type == GPIO_TYPE_V2) {
- bank->toggle_edge_mode &= ~mask;
rockchip_gpio_writel_bit(bank, d->hwirq, 1,
bank->gpio_regs->int_bothedge);
goto out;
@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
else
polarity |= mask;
}
- break;
- case IRQ_TYPE_EDGE_RISING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity &= ~mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity &= ~mask;
- break;
- default:
- ret = -EINVAL;
- goto out;
+ } else {
+ if (bank->gpio_type == GPIO_TYPE_V2) {
+ rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+ bank->gpio_regs->int_bothedge);
+ } else {
+ bank->toggle_edge_mode &= ~mask;
+ }
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ level |= mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ level |= mask;
+ polarity &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ level &= ~mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ level &= ~mask;
+ polarity &= ~mask;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
}
rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 403f9e833d6a..7d82388b4ab7 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
NULL,
chip->base + SIFIVE_GPIO_OUTPUT_EN,
chip->base + SIFIVE_GPIO_INPUT_EN,
- 0);
+ BGPIOF_READ_OUTPUT_REG_SET);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 838bbfed11d3..8e5d87984a48 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -547,7 +547,7 @@ struct gpio_sim_bank {
*
* So we need to store the pointer to the parent struct here. We can
* dereference it anywhere we need with no checks and no locking as
- * it's guaranteed to survive the childred and protected by configfs
+ * it's guaranteed to survive the children and protected by configfs
* locks.
*
* Same for other structures.
@@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
return container_of(group, struct gpio_sim_bank, group);
}
+static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
+{
+ return bank->label && *bank->label;
+}
+
static struct gpio_sim_device *
gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
{
@@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
* point the device doesn't exist yet and so dev_name()
* is not available.
*/
- hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s", dev->id,
- fwnode_get_name(bank->swnode));
+ if (gpio_sim_bank_has_label(bank))
+ hog->chip_label = kstrdup(bank->label,
+ GFP_KERNEL);
+ else
+ hog->chip_label = kasprintf(GFP_KERNEL,
+ "gpio-sim.%u-%s",
+ dev->id,
+ fwnode_get_name(
+ bank->swnode));
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
@@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
- if (bank->label)
+ if (gpio_sim_bank_has_label(bank))
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
@@ -1311,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item)
kfree(hog);
}
-struct configfs_item_operations gpio_sim_hog_config_item_ops = {
+static struct configfs_item_operations gpio_sim_hog_config_item_ops = {
.release = gpio_sim_hog_config_item_release,
};
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 34b36a8c035f..031fe105b58e 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
return offset + pin;
}
+#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
+
static void tegra186_irq_ack(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
base = tegra186_gpio_get_base(gpio, data->hwirq);
@@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data)
static void tegra186_irq_mask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data)
static void tegra186_irq_unmask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -1069,6 +1075,7 @@ static const struct tegra_gpio_soc tegra241_main_soc = {
.ports = tegra241_main_ports,
.name = "tegra241-gpio",
.instance = 0,
+ .num_irqs_per_bank = 8,
};
#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
@@ -1089,6 +1096,7 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.ports = tegra241_aon_ports,
.name = "tegra241-gpio-aon",
.instance = 1,
+ .num_irqs_per_bank = 8,
};
static const struct of_device_id tegra186_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 5b103221b58d..fa4bc7481f9a 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -281,7 +281,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
u8 irq_status;
irq_chip->name = chip->label;
- irq_chip->parent_device = &pdev->dev;
irq_chip->irq_mask = tqmx86_gpio_irq_mask;
irq_chip->irq_unmask = tqmx86_gpio_irq_unmask;
irq_chip->irq_set_type = tqmx86_gpio_irq_set_type;
@@ -316,6 +315,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
+ irq_domain_set_pm_device(girq->domain, dev);
+
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index d885032cf814..d918d2df4de2 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -1,7 +1,7 @@
/*
* Digital I/O driver for Technologic Systems I2C FPGA Core
*
- * Copyright (C) 2015 Technologic Systems
+ * Copyright (C) 2015, 2018 Technologic Systems
* Copyright (C) 2016 Savoir-Faire Linux
*
* This program is free software; you can redistribute it and/or
@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
- /*
- * This will clear the output enable bit, the other bits are
- * dontcare when this is cleared
+ /* Only clear the OE bit here, requires a RMW. Prevents potential issue
+ * with OE and data getting to the physical pin at different times.
*/
- return regmap_write(priv->regmap, offset, 0);
+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
}
static int ts4900_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int reg;
int ret;
+ /* If changing from an input to an output, we need to first set the
+ * proper data bit to what is requested and then set OE bit. This
+ * prevents a glitch that can occur on the IO line
+ */
+ regmap_read(priv->regmap, offset, &reg);
+ if (!(reg & TS4900_GPIO_OE)) {
+ if (value)
+ reg = TS4900_GPIO_OUT;
+ else
+ reg &= ~TS4900_GPIO_OUT;
+
+ regmap_write(priv->regmap, offset, reg);
+ }
+
if (value)
ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
TS4900_GPIO_OUT);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c0f6a25c3279..a5495ad31c9c 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -307,7 +307,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
if (IS_ERR(desc))
return desc;
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
if (ret)
dev_warn(chip->parent,
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
@@ -1035,7 +1036,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
if (ret < 0)
return ret;
- ret = gpio_set_debounce_timeout(desc, info.debounce);
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
if (ret)
return ret;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c7b5446d01fd..ffa0256cad5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_lh;
}
- ret = gpiod_request(desc, lh->label);
+ ret = gpiod_request_user(desc, lh->label);
if (ret)
goto out_free_lh;
lh->descs[i] = desc;
@@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
}
- ret = gpiod_request(desc, lr->label);
+ ret = gpiod_request_user(desc, lr->label);
if (ret)
goto out_free_linereq;
@@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- ret = gpiod_request(desc, le->label);
+ ret = gpiod_request_user(desc, le->label);
if (ret)
goto out_free_le;
le->desc = desc;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4098bc7f88b7..44c1ad51b3fe 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class,
* they may be undone on its behalf too.
*/
- status = gpiod_request(desc, "sysfs");
- if (status) {
- if (status == -EPROBE_DEFER)
- status = -ENODEV;
+ status = gpiod_request_user(desc, "sysfs");
+ if (status)
goto done;
- }
status = gpiod_set_transitory(desc, false);
if (!status) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3859911b61e9..6630d92e30ad 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2227,6 +2227,16 @@ static int gpio_set_bias(struct gpio_desc *desc)
return gpio_set_config_with_argument_optional(desc, bias, arg);
}
+/**
+ * gpio_set_debounce_timeout() - Set debounce timeout
+ * @desc: GPIO descriptor to set the debounce timeout
+ * @debounce: Debounce timeout in microseconds
+ *
+ * The function calls the certain GPIO driver to set debounce timeout
+ * in the hardware.
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
{
return gpio_set_config_with_argument_optional(desc,
@@ -3147,6 +3157,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
return retirq;
}
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ if (gc->irq.chip) {
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ return -EPROBE_DEFER;
+ }
+#endif
return -ENXIO;
}
EXPORT_SYMBOL_GPL(gpiod_to_irq);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 30bc3f80f83e..c31f4626915d 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -135,6 +135,18 @@ struct gpio_desc {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+
+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+{
+ int ret;
+
+ ret = gpiod_request(desc, label);
+ if (ret == -EPROBE_DEFER)
+ ret = -ENODEV;
+
+ return ret;
+}
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d8b854fcbffa..9a53a4de2bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
+
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4811b0faafd9..0e12315fa0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
}
}
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
/**
* amdgpu_acpi_is_s0ix_active
*
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- if (adev->flags & AMD_IS_APU)
- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
}
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82011e75ed85..c4387b38229c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1141,7 +1141,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
- if (!dev->mode_config.allow_fb_modifiers) {
+ if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b21bcdc97460..0ead08ba58c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1525,6 +1525,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x99A0,
0x99A2,
0x99A4,
+ /* radeon secondary ids */
+ 0x3171,
+ 0x3e70,
+ 0x4164,
+ 0x4165,
+ 0x4166,
+ 0x4168,
+ 0x4170,
+ 0x4171,
+ 0x4172,
+ 0x4173,
+ 0x496e,
+ 0x4a69,
+ 0x4a6a,
+ 0x4a6b,
+ 0x4a70,
+ 0x4a74,
+ 0x4b69,
+ 0x4b6b,
+ 0x4b6c,
+ 0x4c6e,
+ 0x4e64,
+ 0x4e65,
+ 0x4e66,
+ 0x4e67,
+ 0x4e68,
+ 0x4e69,
+ 0x4e6a,
+ 0x4e71,
+ 0x4f73,
+ 0x5569,
+ 0x556b,
+ 0x556d,
+ 0x556f,
+ 0x5571,
+ 0x5854,
+ 0x5874,
+ 0x5940,
+ 0x5941,
+ 0x5b72,
+ 0x5b73,
+ 0x5b74,
+ 0x5b75,
+ 0x5d44,
+ 0x5d45,
+ 0x5d6d,
+ 0x5d6f,
+ 0x5d72,
+ 0x5d77,
+ 0x5e6b,
+ 0x5e6d,
+ 0x7120,
+ 0x7124,
+ 0x7129,
+ 0x712e,
+ 0x712f,
+ 0x7162,
+ 0x7163,
+ 0x7166,
+ 0x7167,
+ 0x7172,
+ 0x7173,
+ 0x71a0,
+ 0x71a1,
+ 0x71a3,
+ 0x71a7,
+ 0x71bb,
+ 0x71e0,
+ 0x71e1,
+ 0x71e2,
+ 0x71e6,
+ 0x71e7,
+ 0x71f2,
+ 0x7269,
+ 0x726b,
+ 0x726e,
+ 0x72a0,
+ 0x72a8,
+ 0x72b1,
+ 0x72b3,
+ 0x793f,
};
static const struct pci_device_id pciidlist[] = {
@@ -1930,6 +2011,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+ amdgpu_aspm = 0;
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -2165,13 +2249,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
- return pm_runtime_suspended(dev) &&
- pm_suspend_via_firmware();
+ return pm_runtime_suspended(dev);
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c3f24069f2a..4655702a5e00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1904,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d99c8779b51e..5224d9a39737 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
int index)
{
struct drm_plane *plane;
- uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
&amdgpu_vkms_plane_funcs,
amdgpu_vkms_formats,
ARRAY_SIZE(amdgpu_vkms_formats),
- modifiers, type, NULL);
+ NULL, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b37fc7d7d2c7..418341a67517 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if eviction list is empty.
+ * True if VM is not evicting.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- return list_empty(&vm->evicted);
+ bool ret;
+
+ amdgpu_vm_eviction_lock(vm);
+ ret = !vm->evicting;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return ret && list_empty(&vm->evicted);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index b4eddf6e98a6..ff738e9725ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
disabled_sa = tmp;
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 38bb42727715..a2f8ed0e6a64 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1140,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
+ return;
+
adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e8e4749e9c79..f0638db57111 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU saves SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_fini(adev);
}
@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU restores SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0fc1747e4a70..12f80fdc1fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
- !(adev->apu_flags & AMD_APU_IS_RAVEN2))
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
+ /*
+ * MMHUB PG needs to be disabled for Picasso for
+ * stability reasons.
+ */
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index ed5385137f48..e27ca3758762 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -24,6 +24,7 @@
#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -224,7 +225,6 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- get_page(page);
lock_page(page);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index ea68f3b3a4e9..6d643b4b791d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -25,6 +25,7 @@
#include <linux/hashtable.h>
#include <linux/mmu_notifier.h>
+#include <linux/memremap.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/atomic.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7f9773f8dab6..075429bea427 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
@@ -4256,6 +4256,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
#endif
+ /* Disable vblank IRQs aggressively for power-saving. */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4301,19 +4304,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
update_connector_ext_caps(aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
- /*
- * Disable vblank IRQs aggressively for power-saving.
- *
- * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
- * is also supported.
- */
- adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
-
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ff5bb152ef49..e6ef36de0825 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2033,10 +2033,10 @@ static void calculate_bandwidth(
kfree(surface_type);
free_tiling_mode:
kfree(tiling_mode);
-free_yclk:
- kfree(yclk);
free_sclk:
kfree(sclk);
+free_yclk:
+ kfree(yclk);
}
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index ec19678a0702..e447c74be713 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu(
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
- dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index f977f29907df..10c7be40dfb0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -473,8 +473,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
/* Refresh bounding box */
+ DC_FP_START();
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
+ DC_FP_END();
}
static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 48005def1164..bc4ddc36fe58 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 7.95,
- .sr_enter_plus_exit_time_us = 9,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4162ce40089b..9d17c5a5ae01 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 6.09,
- .sr_enter_plus_exit_time_us = 7.14,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
}
@@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn31_bw_params.wm_table = lpddr5_wm_table;
} else {
- dcn31_bw_params.wm_table = ddr4_wm_table;
+ dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index a1011f3273f3..de3f4643eeef 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Result_Failed) {
- ASSERT(0);
+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ param == TABLE_WATERMARKS)
+ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else
+ ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
return -1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 01c8849b9db2..ba1aa994db4b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -985,10 +985,13 @@ static bool dc_construct(struct dc *dc,
goto fail;
#ifdef CONFIG_DRM_AMD_DC_DCN
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
-#endif
- if (dc->res_pool->funcs->update_bw_bounding_box)
+ if (dc->res_pool->funcs->update_bw_bounding_box) {
+ DC_FP_START();
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ DC_FP_END();
+ }
+#endif
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
@@ -1220,6 +1223,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
}
@@ -1404,20 +1409,34 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other unblanked pipes as they have already been synced */
- for (j = j + 1; j < group_size; j++) {
- bool is_blanked;
- if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
- is_blanked =
- pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
- else
- is_blanked =
- pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
- if (!is_blanked) {
- group_size--;
- pipe_set[j] = pipe_set[group_size];
- j--;
+ /* remove any other pipes that are already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
+ }
+ } else {
+ for (j = j + 1; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 05e216524370..61b8f29a0c30 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -202,7 +202,7 @@ void dp_wait_for_training_aux_rd_interval(
uint32_t wait_in_micro_secs)
{
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (wait_in_micro_secs > 16000)
+ if (wait_in_micro_secs > 1000)
msleep(wait_in_micro_secs/1000);
else
udelay(wait_in_micro_secs);
@@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -6935,7 +6955,7 @@ bool dpcd_write_128b_132b_sst_payload_allocation_table(
}
}
retries++;
- udelay(5000);
+ msleep(5);
}
if (!result && retries == max_retries) {
@@ -6987,7 +7007,7 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
break;
}
- udelay(5000);
+ msleep(5);
}
if (result == ACT_FAILED) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d4ff6cc6b8d9..18757c158523 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1964,10 +1964,6 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool,
del_pipe->stream_res.stream_enc,
false);
- /* Release link encoder from stream in new dc_state. */
- if (dc->res_pool->funcs->link_enc_unassign)
- dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
@@ -3217,6 +3213,60 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
}
#endif
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd;
+
+ /* If pipe backend is reset, need to reset pipe syncd status */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ /* Reset all the syncd pipes from the disabled pipe */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j);
+ }
+ }
+ }
+}
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx);
+
+ /* for the pipe disabled, check if any slave pipe exists and assert */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
+
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index da2c78ce14d6..b51864890621 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -202,6 +202,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ uint32_t max_otg_num;
};
struct dc_bug_wa {
@@ -344,6 +345,7 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool use_pipe_ctx_sync_logic;
};
enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 78192ecba102..eb2755bdb30e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1566,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
+ /* make sure no pipes syncd to the pipe being enabled */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic)
+ check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx);
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
@@ -1604,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
- if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
- pipe_ctx->stream_res.stream_enc);
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1835,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
break;
}
}
- // We are trying to enable eDP, don't power down VDD
- if (can_apply_edp_fast_boot)
+
+ /*
+ * TO-DO: So far the code logic below only addresses single eDP case.
+ * For dual eDP case, there are a few things that need to be
+ * implemented first:
+ *
+ * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+ * stream[0 or 1] will all be checked.
+ *
+ * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+ * for each eDP.
+ *
+ * Once above 2 things are completed, we can then change the logic below
+ * correspondingly, so dual eDP case will be fully covered.
+ */
+
+ // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+ if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
keep_edp_vdd_on = true;
+ DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+ } else {
+ DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+ }
}
// Check seamless boot support
@@ -2297,6 +2316,10 @@ enum dc_status dce110_apply_ctx_to_hw(
enum dc_status status;
int i;
+ /* reset syncd pipes from disabled pipes */
+ if (dc->config.use_pipe_ctx_sync_logic)
+ reset_syncd_pipes_from_disabled_pipes(dc, context);
+
/* Reset old context */
/* look up the targets that have been removed since last commit */
hws->funcs.reset_hw_ctx_wrap(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index bf4436d7aaab..b0c08ee6bc2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-
- /* set DIG_START to 0x1 to reset FIFO */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
- udelay(100);
-
- /* write 0 to take the FIFO out of reset */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
-}
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a146a41f68e9..687d7e4bf7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2bc93df023ad..2a72517e2b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 8a70f92795c2..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 8daa12730bc1..a04ca4a98392 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 602ec9a08549..8ca26383b568 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1878,7 +1878,6 @@ noinline bool dcn30_internal_validate_bw(
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- DC_FP_START();
if (!pipe_cnt) {
out = true;
goto validate_out;
@@ -2104,7 +2103,6 @@ validate_fail:
out = false;
validate_out:
- DC_FP_END();
return out;
}
@@ -2306,7 +2304,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index c1c6e602b06c..5d9637b07429 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1380,6 +1380,17 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
+static void dcn301_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ DC_FP_START();
+ dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+}
+
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 90c73a1cb986..5e3bcaf12cac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
- if (ret_val > clamp_value)
+ if (ret_val > clamp_value) {
+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+ ASSERT(0);
ret_val = clamp_value;
+ }
return ret_val;
}
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 42ed47e8133d..8d64187478e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2260,6 +2260,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 246071c72f6b..548cdef8a8ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 015e7f2c0b16..0fc9f3e3ffae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 8bc27de4c104..618f4b682ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg(
mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index aef854270054..747167083dea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 94c32832a0e7..0a7a33864973 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
index fc7065d17842..774b0fdfc80b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i,
void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index d46a2733024c..8f9f1d607f7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st {
double t_sr_wm_us;
double t_extra_us;
double mem_trip_us;
- double t_srx_delay_us;
double deepsleep_dcfclk_mhz;
double total_flip_bw;
unsigned int total_flip_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 71ea503cb32f..412e75eb4704 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -142,9 +142,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v
dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
- "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
- dlg_sys_param->t_srx_delay_us);
- dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 59dc2c5b58dd..3df559c591f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params(
if (dual_plane)
DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
- DTRACE(
- "DLG: %s: t_srx_delay_us = %3.2f",
- __func__,
- (double) dlg_sys_param->t_srx_delay_us);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 890280026e69..943240e2809e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -382,6 +382,7 @@ struct pipe_ctx {
struct pll_settings pll_settings;
uint8_t pipe_idx;
+ uint8_t pipe_idx_syncd;
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 073f8b667eff..c88e113b94d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,10 +164,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 4249bf306e09..dbfe6690ded8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -34,6 +34,10 @@
#define MEMORY_TYPE_HBM 2
+#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
+#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F)
+#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd))
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
@@ -208,6 +212,13 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
const struct dc_link *link);
#endif
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context);
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e2cae97f4ff1..48cc009d9bdf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 777f717c37ae..5488a0edb942 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -421,6 +421,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
return 0;
}
+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t *board_reserved;
+ uint16_t *freq_table_gfx;
+ uint32_t i;
+
+ /* Fix some OEM SKU specific stability issues */
+ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ (adev->pdev->subsystem_device == 0x16C2) &&
+ (adev->pdev->subsystem_vendor == 0x1043))
+ board_reserved[0] = 1387;
+
+ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ ((adev->pdev->subsystem_device == 0x16C2) ||
+ (adev->pdev->subsystem_device == 0x133C)) &&
+ (adev->pdev->subsystem_vendor == 0x1043)) {
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
+ if (freq_table_gfx[i] > 2500)
+ freq_table_gfx[i] = 2500;
+ }
+ }
+
+ return 0;
+}
+
static int sienna_cichlid_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -441,7 +471,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- return ret;
+ return sienna_cichlid_patch_pptable_quirk(smu);
}
static int sienna_cichlid_tables_init(struct smu_context *smu)
@@ -1238,21 +1268,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_BEIGE_GOBY:
+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -3696,14 +3742,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
+ uint16_t *mgpu_fan_boost_limit_rpm;
+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
- if (!smc_pptable->MGpuFanBoostLimitRpm)
+ if (*mgpu_fan_boost_limit_rpm == 0)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 38cd0ece24f6..42f705c7a36f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..0bc84b709a93 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
+ int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n");
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 58a242871b28..6e3f1d600541 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,6 +6,7 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index d9eb353a4bf0..dbe1cc620f6e 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
};
static const struct ast_vbios_enhtable res_1600x900[] = {
- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 60, 1, 0x3A },
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 61db5a66b493..44ad70939663 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -8,7 +8,6 @@ config DRM_BRIDGE
config DRM_PANEL_BRIDGE
def_bool y
depends on DRM_BRIDGE
- depends on DRM_KMS_HELPER
select DRM_PANEL
help
DRM bridge wrapper of DRM panels
@@ -30,6 +29,7 @@ config DRM_CDNS_DSI
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
+ select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index a7389a0facfb..af07eeb47ca0 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/math64.h>
@@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
/*
* ui2bc - UI time periods to byte clock cycles
*/
-static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+static u32 ui2bc(unsigned int ui)
{
- u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-
- return DIV64_U64_ROUND_UP(ui * dsi->lanes,
- dsi->mode.clock * 1000 * bpp);
+ return DIV_ROUND_UP(ui, BITS_PER_BYTE);
}
/*
@@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi)
}
/* values in byte clock cycles */
- cycles = ui2bc(dsi, cfg->clk_pre);
+ cycles = ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
- cycles += ui2bc(dsi, cfg->clk_pre);
+ cycles += ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
cycles = ps2bc(dsi, cfg->hs_exit);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index dab8f76618f3..68d8415e6c28 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {}
static void ti_sn65dsi86_runtime_disable(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
@@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
"failed to get reference clock\n");
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+ pm_runtime_use_autosuspend(pdata->dev);
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(pdata->dev, 500);
- pm_runtime_use_autosuspend(pdata->dev);
ti_sn65dsi86_debugfs_init(pdata);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 21174efd91be..88cd992df356 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1327,8 +1327,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
drm_dbg_atomic(dev, "checking %p\n", state);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- requested_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ requested_crtc |= drm_crtc_mask(crtc);
+ }
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
@@ -1377,8 +1379,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- affected_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ affected_crtc |= drm_crtc_mask(crtc);
+ }
/*
* For commits that allow modesets drivers can add other CRTCs to the
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 9781722519c3..54d62fdb4ef9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
+ struct drm_property_blob *blob;
+
drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
+ blob = drm_property_create_blob(crtc->dev,
+ sizeof(umode), &umode);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
+
+ state->mode_blob = blob;
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index f19d9acbe959..50b8a088f763 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -27,11 +27,11 @@
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-
#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/cc_platform.h>
+#include <linux/ioport.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index a50c82bc2b2f..76a8c707c34b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable)
{
+ if (!connector->vrr_capable_property)
+ return;
+
drm_object_property_set_value(&connector->base,
connector->vrr_capable_property,
capable);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 12893e7be89b..f5f5de362ff2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return quirks;
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, edid);
/*
@@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cefd0cbf9deb..dc275c466c9c 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_DONTEXPAND;
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 042bb80383c9..b910978d3e48 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -275,6 +281,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* OneXPlayer */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index beaf99e9120a..b688841c18e4 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
*
* The notifier is called with no locks held. The new hw_state and sw_state
* can be retrieved using the drm_privacy_screen_get_state() function.
- * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
* argument of the notifier_block's notifier_call.
*
* The notifier will NOT be called when changes are made through
diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c
index a2cafb294ca6..e7aa74ad0b24 100644
--- a/drivers/gpu/drm/drm_privacy_screen_x86.c
+++ b/drivers/gpu/drm/drm_privacy_screen_x86.c
@@ -33,6 +33,9 @@ static bool __init detect_thinkpad_privacy_screen(void)
unsigned long long output;
acpi_status status;
+ if (acpi_disabled)
+ return false;
+
/* Get embedded-controller handle */
status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle);
if (ACPI_FAILURE(status) || !ec_handle)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b03c20c14ca1..a17313282e8b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
- args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
+ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
+ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 12571ac45540..c04264f70ad1 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct decon_context *ctx;
struct device_node *i80_if_timings;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev)
goto err_iounmap;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- ctx->i80_if ? "lcd_sys" : "vsync");
- if (!res) {
- dev_err(dev, "irq request failed.\n");
- ret = -ENXIO;
+ ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+ if (ret < 0)
goto err_iounmap;
- }
- ret = devm_request_irq(dev, res->start, decon_irq_handler,
- 0, "drm_decon", ctx);
+ ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
goto err_iounmap;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 32a36572b894..d13f5e3a030d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
int ret;
int te_gpio_irq;
- dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
- if (IS_ERR(dsi->te_gpio)) {
+ dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
+ if (!dsi->te_gpio) {
+ return 0;
+ } else if (IS_ERR(dsi->te_gpio)) {
dev_err(dsi->dev, "gpio request failed with %ld\n",
PTR_ERR(dsi->te_gpio));
return PTR_ERR(dsi->te_gpio);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 023f54ee61a8..0ee32e4b1e43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev)
struct exynos_drm_ipp_formats *formats;
struct device *dev = &pdev->dev;
struct fimc_context *ctx;
- struct resource *res;
int ret;
int i, j, num_limits, num_formats;
@@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev)
return PTR_ERR(ctx->regs);
/* resource irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to request irq resource.\n");
- return -ENOENT;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, fimc_irq_handler,
- 0, dev_name(dev), ctx);
+ ret = devm_request_irq(dev, ret, fimc_irq_handler,
+ 0, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index c735e53939d8..7d5a483a54de 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct device_node *i80_if_timings;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev)
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- ctx->i80_if ? "lcd_sys" : "vsync");
- if (!res) {
- dev_err(dev, "irq request failed.\n");
- return -ENXIO;
- }
+ ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, fimd_irq_handler,
- 0, "drm_fimd", ctx);
+ ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 166a80262896..964dceb28c1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev)
struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
- struct resource *res;
int num_formats, ret, i, j;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev)
return PTR_ERR(ctx->regs);
/* resource irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to request irq resource.\n");
- return -ENOENT;
- }
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0)
+ return ctx->irq;
- ctx->irq = res->start;
ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
dev_name(dev), ctx);
if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 41c54f1f60bc..e5204be86093 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "get interrupt resource failed.\n");
- return -ENXIO;
- }
+ ret = platform_get_irq(mixer_ctx->pdev, 0);
+ if (ret < 0)
+ return ret;
+ mixer_ctx->irq = ret;
- ret = devm_request_irq(dev, res->start, mixer_irq_handler,
- 0, "drm_mixer", mixer_ctx);
+ ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler,
+ 0, "drm_mixer", mixer_ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
return ret;
}
- mixer_ctx->irq = res->start;
return 0;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a4c94dc2e216..cfd932514da2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on X86
depends on 64BIT
default n
help
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 2da4aacc956b..8ac196e814d5 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -825,6 +825,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int max_bw_point = 0, max_bw = 0;
unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ bool changed = false;
u32 mask = 0;
/* FIXME earlier gens need some checks too */
@@ -868,6 +869,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state->data_rate[crtc->pipe] = new_data_rate;
new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+ changed = true;
+
drm_dbg_kms(&dev_priv->drm,
"pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -875,7 +878,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!new_bw_state)
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (new_bw_state &&
+ intel_can_enable_sagv(dev_priv, old_bw_state) !=
+ intel_can_enable_sagv(dev_priv, new_bw_state))
+ changed = true;
+
+ /*
+ * If none of our inputs (data rates, number of active
+ * planes, SAGV yes/no) changed then nothing to do here.
+ */
+ if (!changed)
return 0;
ret = intel_atomic_lock_global_state(&new_bw_state->base);
@@ -961,7 +976,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
*/
new_bw_state->qgv_points_mask = ~allowed_points & mask;
- old_bw_state = intel_atomic_get_old_bw_state(state);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 46c6eecbd917..0ceaed1c9656 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -30,19 +30,19 @@ struct intel_bw_state {
*/
u8 pipe_sagv_reject;
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
/*
* Current QGV points mask, which restricts
* some particular SAGV states, not to confuse
* with pipe_sagv_mask.
*/
- u8 qgv_points_mask;
+ u16 qgv_points_mask;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
- /* bitmask of active pipes */
- u8 active_pipes;
-
int min_cdclk;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index bf7ce684dd8e..bb4a85445fc6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
+ skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index c1439fcb5a95..3ff149df4a77 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
struct drm_display_mode *fixed_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = connector->encoder;
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
return NULL;
}
+ if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
+ encoder->port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS only supported on eDP port A\n");
+ return NULL;
+ }
+
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 160fd2bdafe5..957feeccff3f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
if (DISPLAY_VER(i915) >= 11 &&
- (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+ (plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0065111593a6..4a2662838cd8 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
port++;
}
+ /*
+ * The port numbering and mapping here is bizarre. The now-obsolete
+ * swsci spec supports ports numbered [0..4]. Port E is handled as a
+ * special case, but port F and beyond are not. The functionality is
+ * supposed to be obsolete for new platforms. Just bail out if the port
+ * number is out of bounds after mapping.
+ */
+ if (port > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+ intel_encoder->base.base.id, intel_encoder->base.name,
+ port_name(intel_encoder->port), port);
+ return -EINVAL;
+ }
+
if (!enable)
parm |= 4 << 8;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 1a376e9a1ff3..d610e48cab94 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
const struct intel_crtc_state *pipe_config =
overlay->crtc->config;
+ if (rec->dst_height == 0 || rec->dst_width == 0)
+ return -EINVAL;
+
if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h &&
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a1a663f362e7..00279e8c2775 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1406,6 +1406,13 @@ static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
+static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
+ PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1510,7 +1517,13 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = PSR2_MAN_TRK_CTL_ENABLE;
+ u32 val = 0;
+
+ if (!IS_ALDERLAKE_P(dev_priv))
+ val = PSR2_MAN_TRK_CTL_ENABLE;
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
if (full_update) {
/*
@@ -1530,7 +1543,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
} else {
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
- val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 09f405e4d363..92ff654f54f5 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
- phy);
+ phy_name(phy));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 40faa18947c9..7784c30fe893 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
@@ -690,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
@@ -701,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- if (active_links) {
- enum intel_display_power_domain domain;
- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
@@ -715,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
&dig_port->tc_lock_power_domain);
-
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ } else {
+ /*
+ * TBT-alt is the default mode in any case the PHY ownership is not
+ * held (regardless of the sink's connected live state), so
+ * we'll just switch to disconnected mode from it here without
+ * a note.
+ */
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ icl_tc_phy_disconnect(dig_port);
}
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3a5b247be738..1736efa43339 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
timeout) < 0) {
i915_request_put(rq);
- tl = intel_context_timeline_lock(ce);
+ /*
+ * Error path, cannot use intel_context_timeline_lock as
+ * that is user interruptable and this clean up step
+ * must be done.
+ */
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
+ mutex_unlock(&ce->timeline->mutex);
if (nonblock)
return -EWOULDBLOCK;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 4b4829eb16c2..0dd107dcecc2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -311,6 +311,7 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(6)
#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(8)
+#define I915_BO_WAS_BOUND_BIT 9
/**
* @mem_flags - Mutable placement-related flags
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 9f429ed6e78a..a50f884973bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -10,6 +10,8 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "gt/intel_gt.h"
+
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -221,6 +223,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
+ intel_gt_invalidate_tlbs(to_gt(i915));
+ }
+
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index de3fe79b665a..1f880c8c66e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index ee9612a3ee5e..e130c820ae4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
if (!IS_ERR(fence))
goto out;
- } else if (move_deps) {
- int err = i915_deps_sync(move_deps, ctx);
+ } else {
+ int err = PTR_ERR(fence);
+
+ if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+ return fence;
- if (err)
- return ERR_PTR(err);
+ if (move_deps) {
+ err = i915_deps_sync(move_deps, ctx);
+ if (err)
+ return ERR_PTR(err);
+ }
}
/* Error intercept failed or no accelerated migration to start with */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f98f0fb21efb..35d0fcd3a86c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -29,6 +29,8 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
spin_lock_init(&gt->irq_lock);
+ mutex_init(&gt->tlb_invalidate_lock);
+
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -912,3 +914,109 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
+
+struct reg_and_bit {
+ i915_reg_t reg;
+ u32 bit;
+};
+
+static struct reg_and_bit
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
+ const i915_reg_t *regs, const unsigned int num)
+{
+ const unsigned int class = engine->class;
+ struct reg_and_bit rb = { };
+
+ if (drm_WARN_ON_ONCE(&engine->i915->drm,
+ class >= num || !regs[class].reg))
+ return rb;
+
+ rb.reg = regs[class];
+ if (gen8 && class == VIDEO_DECODE_CLASS)
+ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
+ else
+ rb.bit = engine->instance;
+
+ rb.bit = BIT(rb.bit);
+
+ return rb;
+}
+
+void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+{
+ static const i915_reg_t gen8_regs[] = {
+ [RENDER_CLASS] = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
+ [COPY_ENGINE_CLASS] = GEN8_BTCR,
+ };
+ static const i915_reg_t gen12_regs[] = {
+ [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const i915_reg_t *regs;
+ unsigned int num = 0;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (GRAPHICS_VER(i915) == 12) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return;
+ }
+
+ if (drm_WARN_ONCE(&i915->drm, !num,
+ "Platform does not implement TLB invalidation!"))
+ return;
+
+ GEM_TRACE("\n");
+
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ mutex_lock(&gt->tlb_invalidate_lock);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ /*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+ const unsigned int timeout_us = 100;
+ const unsigned int timeout_ms = 4;
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ if (__intel_wait_for_register_fw(uncore,
+ rb.reg, rb.bit, 0,
+ timeout_us, timeout_ms,
+ NULL))
+ drm_err_ratelimited(&gt->i915->drm,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, timeout_ms);
+ }
+
+ /*
+ * Use delayed put since a) we mostly expect a flurry of TLB
+ * invalidations so it is good to avoid paying the forcewake cost and
+ * b) it works around a bug in Icelake which cannot cope with too rapid
+ * transitions.
+ */
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+ mutex_unlock(&gt->tlb_invalidate_lock);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 3ace129eb2af..a913fb6ffec3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -91,4 +91,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
+void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 14216cc471b1..f20687796490 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -73,6 +73,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct mutex tlb_invalidate_lock;
+
struct i915_wa_list wa_list;
struct intel_gt_timelines {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index f9240d4baa69..3aabe164c329 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -206,6 +206,11 @@ struct intel_guc {
* context usage for overflows.
*/
struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 13b27b8ff74e..ba21ace973da 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
id,
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e7517206af82..154ad726e266 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
if (new_start == lower_32_bits(*prev_start))
return;
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
@@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
-static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
{
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ int i = 0;
+
+ do {
+ *last_in = READ_ONCE(rec->last_switch_in_stamp);
+ *id = READ_ONCE(rec->current_context_index);
+ *total = READ_ONCE(rec->total_runtime);
+
+ if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
+ READ_ONCE(rec->current_context_index) == *id &&
+ READ_ONCE(rec->total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 last_switch = rec->last_switch_in_stamp;
- u32 ctx_id = rec->current_context_index;
- u32 total = rec->total_runtime;
+ u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
@@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
}
}
-static void guc_update_pm_timestamp(struct intel_guc *guc,
- struct intel_engine_cs *engine,
- ktime_t *now)
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static u64 gpm_timestamp(struct intel_gt *gt)
+{
+ u32 lo, hi, old_hi, loop = 0;
+
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ do {
+ lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
+ old_hi = hi;
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ } while (old_hi != hi && loop++ < 2);
+
+ return ((u64)hi << 32) | lo;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
- u32 gt_stamp_now, gt_stamp_hi;
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gt_stamp_now = intel_uncore_read(engine->uncore,
- RING_TIMESTAMP(engine->mmio_base));
+ gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
- if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
- guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
@@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
guc_update_engine_gt_clks(engine);
- guc_update_pm_timestamp(guc, engine, now);
+ guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
@@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
@@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
ktime_t unused;
spin_lock_irqsave(&guc->timestamp.lock, flags);
- for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
- }
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
@@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = &gt->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
if (!guc_submission_initialized(guc))
return;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
guc->timestamp.ping_delay);
}
@@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 99d1781fa5f0..af79b39048f7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/**
+/*
* Check if can do 2M page
* @vgpu: target vgpu
* @entry: target pfn's gtt entry
@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU
* @off: register offset
* @p_data: data will be returned to guest
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5ae812d60abe..0633888a411e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
if (!ee)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
index 76f1d53bdf34..3ad22bbe80eb 100644
--- a/drivers/gpu/drm/i915/i915_mm.h
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -6,6 +6,7 @@
#ifndef __I915_MM_H__
#define __I915_MM_H__
+#include <linux/bug.h>
#include <linux/types.h>
struct vm_area_struct;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 971d601fe751..902e4c802a12 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+#define MISC_STATUS0 _MMIO(0xA500)
+#define MISC_STATUS1 _MMIO(0xA504)
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
@@ -2721,6 +2722,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -2819,6 +2826,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)
@@ -4817,6 +4829,7 @@ enum {
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0)
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31)
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 29a858c53bdd..c0d6d5526abe 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -457,6 +457,9 @@ int i915_vma_bind(struct i915_vma *vma,
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
+ if (vma->obj)
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+
atomic_or(bind_flags, &vma->flags);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index da8f82c2342f..fc8a68f3a2ed 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
@@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_GEN9_BC(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 6bff77521094..4ba0f1967cca 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -50,11 +50,11 @@ enum intel_pch {
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 434b1f8b7fe3..fae4f7818d28 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4029,6 +4029,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
+ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+ intel_can_enable_sagv(dev_priv, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
@@ -4044,17 +4055,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
intel_can_enable_sagv(dev_priv, new_bw_state);
}
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
};
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
@@ -4732,6 +4736,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
.join_mbus = true,
},
{
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
@@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].join_mbus;
}
@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
}
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe];
}
return 0;
@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
}
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
}
-static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
}
-static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes);
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes);
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
continue;
@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (IS_ALDERLAKE_P(dev_priv))
- new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum pipe pipe = crtc->pipe;
unsigned int mbus_offset;
enum plane_id plane_id;
+ u8 slices;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
- dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
-
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
/*
* Used for checking overlaps, so we need absolute
* offsets instead of MBUS relative offsets.
*/
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(dev_priv, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
+
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 990cdcaf85ce..d2243653a893 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53f1ccb78849..64c2708efc9e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
-
- if (rpm->available)
- stack_depot_init();
+ stack_depot_init();
}
static noinline depot_stack_handle_t
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index fc25ebf1a593..778da3179b3c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -724,7 +724,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
}
static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
- enum forcewake_domains fw_domains)
+ enum forcewake_domains fw_domains,
+ bool delayed)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
@@ -739,7 +740,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- fw_domains_put(uncore, domain->mask);
+ if (delayed &&
+ !(domain->uncore->fw_domains_timer & domain->mask))
+ fw_domain_arm_timer(domain);
+ else
+ fw_domains_put(uncore, domain->mask);
}
}
@@ -760,7 +765,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
return;
spin_lock_irqsave(&uncore->lock, irqflags);
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
+}
+
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ unsigned long irqflags;
+
+ if (!uncore->fw_get_funcs)
+ return;
+
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains, true);
spin_unlock_irqrestore(&uncore->lock, irqflags);
}
@@ -802,7 +820,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
if (!uncore->fw_get_funcs)
return;
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
}
void assert_forcewakes_inactive(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 210fe2a71612..2a15b2b2e2fc 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -246,6 +246,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains domains);
void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 7374f1952762..5c2b2277afbf 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,6 +2,7 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index a8aba0141ce7..06cb1a59b9bc 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -217,14 +217,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
if (!imx_pd_format_supported(bus_fmt))
return -EINVAL;
- if (bus_flags &
- ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
- dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
- return -EINVAL;
- }
-
bridge_state->output_bus_cfg.flags = bus_flags;
bridge_state->input_bus_cfg.flags = bus_flags;
imx_crtc_state->bus_flags = bus_flags;
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 00404ba4126d..2735b8eb3537 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
- case LAYER_2:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
- break;
- case LAYER_3:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
- break;
}
kmb->plane_status[plane_id].disable = true;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5d90d2eb0019..bced4c7d668e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
mtk_dsi_poweroff(dsi);
}
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto err_cleanup_encoder;
+
+ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+ if (IS_ERR(dsi->connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ ret = PTR_ERR(dsi->connector);
+ goto err_cleanup_encoder;
+ }
+ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+ return 0;
+
+err_cleanup_encoder:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
+
+ return device_reset_optional(dev);
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+ .bind = mtk_dsi_bind,
+ .unbind = mtk_dsi_unbind,
+};
+
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
+ struct device *dev = host->dev;
+ int ret;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->next_bridge))
+ return PTR_ERR(dsi->next_bridge);
+
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(host->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ DRM_ERROR("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
return 0;
}
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ component_del(host->dev, &mtk_dsi_component_ops);
+ drm_bridge_remove(&dsi->bridge);
+ return 0;
+}
+
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
int ret;
@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
+ .detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_simple_encoder_init(drm, &dsi->encoder,
- DRM_MODE_ENCODER_DSI);
- if (ret) {
- DRM_ERROR("Failed to encoder init to drm\n");
- return ret;
- }
-
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
-
- ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret)
- goto err_cleanup_encoder;
-
- dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
- if (IS_ERR(dsi->connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- ret = PTR_ERR(dsi->connector);
- goto err_cleanup_encoder;
- }
- drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
-
- return 0;
-
-err_cleanup_encoder:
- drm_encoder_cleanup(&dsi->encoder);
- return ret;
-}
-
-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
-{
- int ret;
- struct drm_device *drm = data;
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- return ret;
-
- return device_reset_optional(dev);
-}
-
-static void mtk_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- drm_encoder_cleanup(&dsi->encoder);
-}
-
-static const struct component_ops mtk_dsi_component_ops = {
- .bind = mtk_dsi_bind,
- .unbind = mtk_dsi_unbind,
-};
-
static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct drm_panel *panel;
struct resource *regs;
int irq_num;
int ret;
@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->next_bridge);
- if (ret)
- goto err_unregister_host;
-
- if (panel) {
- dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(dsi->next_bridge)) {
- ret = PTR_ERR(dsi->next_bridge);
- goto err_unregister_host;
- }
- }
-
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- drm_bridge_add(&dsi->bridge);
-
- ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to add component: %d\n", ret);
- goto err_unregister_host;
- }
-
return 0;
err_unregister_host:
@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
- drm_bridge_remove(&dsi->bridge);
- component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host);
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index e9ae22b4f813..52be08b744ad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -404,9 +404,9 @@ mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pl
udelay(50);
/* program pixel pll register */
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
udelay(50);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 51b83776951b..17cfad6424db 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1560,6 +1560,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
+ gpu->suspend_count++;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 93005839b5da..fb261930ad1c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -608,9 +608,27 @@ static int adreno_resume(struct device *dev)
return gpu->funcs->pm_resume(gpu);
}
+static int active_submits(struct msm_gpu *gpu)
+{
+ int active_submits;
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+ mutex_unlock(&gpu->active_lock);
+ return active_submits;
+}
+
static int adreno_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
+ int remaining;
+
+ remaining = wait_event_timeout(gpu->retire_event,
+ active_submits(gpu) == 0,
+ msecs_to_jiffies(1000));
+ if (remaining == 0) {
+ dev_err(dev, "Timeout waiting for GPU to suspend\n");
+ return -EBUSY;
+ }
return gpu->funcs->pm_suspend(gpu);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index a98e964c3b6f..355894a3b48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
struct dpu_hw_pcc_cfg *cfg)
{
- u32 base = ctx->cap->sblk->pcc.base;
+ u32 base;
- if (!ctx || !base) {
+ if (!ctx) {
+ DRM_ERROR("invalid ctx %pK\n", ctx);
+ return;
+ }
+
+ base = ctx->cap->sblk->pcc.base;
+
+ if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 052548883d27..0fe02529b5e7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
- if (!phy_pdev || !msm_dsi->phy) {
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!msm_dsi->phy) {
+ put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c2ed177717c7..2027b38617ab 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -808,12 +808,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings)
{
- struct device *dev = &phy->pdev->dev;
+ struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
+ dev = &phy->pdev->dev;
+
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 3acdeae25caf..719720709e9e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
- if (!phy_pdev || !hdmi->phy) {
+ if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
+ if (!hdmi->phy) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ put_device(&phy_pdev->dev);
+ return -EPROBE_DEFER;
+ }
hdmi->phy_dev = get_device(&phy_pdev->dev);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ad35a5d94053..555666e3f960 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -461,7 +461,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node);
if (ret)
return ret;
- size = r.end - r.start;
+ size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
@@ -510,7 +510,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
struct msm_kms *kms;
- struct msm_mdss *mdss;
int ret, i;
ddev = drm_dev_alloc(drv, dev);
@@ -521,8 +520,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- mdss = priv->mdss;
-
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0f78c2615272..2c1049c0ea14 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -703,6 +703,8 @@ static void retire_submits(struct msm_gpu *gpu)
}
}
}
+
+ wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
@@ -848,6 +850,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
+ init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 445c6bfd4b6b..92aa1e9196c6 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -230,6 +230,9 @@ struct msm_gpu {
/* work for handling GPU recovery: */
struct kthread_work recover_work;
+ /** retire_event: notified when submits are retired: */
+ wait_queue_head_t retire_event;
+
/* work for handling active-list retiring: */
struct kthread_work retire_work;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 62405e980925..9bf319be11f6 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -133,6 +133,18 @@ void msm_devfreq_init(struct msm_gpu *gpu)
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
+static void cancel_idle_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->idle_work.timer);
+ kthread_cancel_work_sync(&df->idle_work.work);
+}
+
+static void cancel_boost_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->boost_work.timer);
+ kthread_cancel_work_sync(&df->boost_work.work);
+}
+
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
@@ -152,7 +164,12 @@ void msm_devfreq_resume(struct msm_gpu *gpu)
void msm_devfreq_suspend(struct msm_gpu *gpu)
{
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ devfreq_suspend_device(df->devfreq);
+
+ cancel_idle_work(df);
+ cancel_boost_work(df);
}
static void msm_devfreq_boost_work(struct kthread_work *work)
@@ -196,7 +213,7 @@ void msm_devfreq_active(struct msm_gpu *gpu)
/*
* Cancel any pending transition to idle frequency:
*/
- hrtimer_cancel(&df->idle_work.timer);
+ cancel_idle_work(df);
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 0655582ae8ed..4cfb6c001679 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
bridge_state =
drm_atomic_get_new_bridge_state(state,
mxsfb->bridge);
- bus_format = bridge_state->input_bus_cfg.format;
+ if (!bridge_state)
+ bus_format = MEDIA_BUS_FMT_FIXED;
+ else
+ bus_format = bridge_state->input_bus_cfg.format;
+
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 3828aafd3ac4..7ba66ad68a8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -39,6 +39,8 @@
#include <linux/sched/mm.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
+#include <linux/migrate.h>
/*
* FIXME: this is ugly right now we are using TTM to allocate vram and we pin
@@ -324,7 +326,6 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- get_page(page);
lock_page(page);
return page;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 266809e511e2..090b9b47708c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -35,6 +35,7 @@
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
#include <linux/rmap.h>
struct nouveau_svm {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index d0f52d59fc2f..64e423dddd9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
*addr += bios->imaged_addr;
}
- if (unlikely(*addr + size >= bios->size)) {
+ if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 434c2861bb40..9989a316fe88 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -106,6 +106,8 @@ config DRM_PANEL_EDP
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
+ select DRM_DP_HELPER
+ select DRM_KMS_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index f043b484055b..ed626fdc08e8 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -293,15 +293,13 @@ static int y030xx067a_probe(struct spi_device *spi)
return 0;
}
-static int y030xx067a_remove(struct spi_device *spi)
+static void y030xx067a_remove(struct spi_device *spi)
{
struct y030xx067a *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
-
- return 0;
}
static const struct drm_display_mode y030xx067a_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 8e84df9a0033..3dfafa585127 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -896,14 +896,12 @@ static int ili9322_probe(struct spi_device *spi)
return 0;
}
-static int ili9322_remove(struct spi_device *spi)
+static void ili9322_remove(struct spi_device *spi)
{
struct ili9322 *ili = spi_get_drvdata(spi);
ili9322_power_off(ili);
drm_panel_remove(&ili->panel);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 2c3378a259b1..a07ef26234e5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -728,7 +728,7 @@ static int ili9341_probe(struct spi_device *spi)
return -1;
}
-static int ili9341_remove(struct spi_device *spi)
+static void ili9341_remove(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct ili9341 *ili = spi_get_drvdata(spi);
@@ -741,7 +741,6 @@ static int ili9341_remove(struct spi_device *spi)
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
- return 0;
}
static void ili9341_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index c558de3f99be..e3b1daa0cb72 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -219,15 +219,13 @@ static int ej030na_probe(struct spi_device *spi)
return 0;
}
-static int ej030na_remove(struct spi_device *spi)
+static void ej030na_remove(struct spi_device *spi)
{
struct ej030na *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
-
- return 0;
}
static const struct drm_display_mode ej030na_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index f3183b68704f..9d0d4faa3f58 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -203,14 +203,12 @@ static int lb035q02_probe(struct spi_device *spi)
return 0;
}
-static int lb035q02_remove(struct spi_device *spi)
+static void lb035q02_remove(struct spi_device *spi)
{
struct lb035q02_device *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
-
- return 0;
}
static const struct of_device_id lb035q02_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 8e5160af1de5..cf246d15b7b6 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -266,14 +266,12 @@ static int lg4573_probe(struct spi_device *spi)
return 0;
}
-static int lg4573_remove(struct spi_device *spi)
+static void lg4573_remove(struct spi_device *spi)
{
struct lg4573 *ctx = spi_get_drvdata(spi);
lg4573_display_off(ctx);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id lg4573_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 6e5ab1debc8b..81c5c541a351 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -212,15 +212,13 @@ static int nl8048_probe(struct spi_device *spi)
return 0;
}
-static int nl8048_remove(struct spi_device *spi)
+static void nl8048_remove(struct spi_device *spi)
{
struct nl8048_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id nl8048_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index d036853db865..f58cfb10b58a 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,7 +292,7 @@ static int nt39016_probe(struct spi_device *spi)
return 0;
}
-static int nt39016_remove(struct spi_device *spi)
+static void nt39016_remove(struct spi_device *spi)
{
struct nt39016 *panel = spi_get_drvdata(spi);
@@ -300,8 +300,6 @@ static int nt39016_remove(struct spi_device *spi)
nt39016_disable(&panel->drm_panel);
nt39016_unprepare(&panel->drm_panel);
-
- return 0;
}
static const struct drm_display_mode kd035g6_display_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index ead479719f00..04640c5256a8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -314,12 +314,11 @@ static int db7430_probe(struct spi_device *spi)
return 0;
}
-static int db7430_remove(struct spi_device *spi)
+static void db7430_remove(struct spi_device *spi)
{
struct db7430 *db = spi_get_drvdata(spi);
drm_panel_remove(&db->panel);
- return 0;
}
/*
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index c4b388850a13..01eb211f32f7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -358,14 +358,12 @@ static int ld9040_probe(struct spi_device *spi)
return 0;
}
-static int ld9040_remove(struct spi_device *spi)
+static void ld9040_remove(struct spi_device *spi)
{
struct ld9040 *ctx = spi_get_drvdata(spi);
ld9040_power_off(ctx);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ld9040_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 1696ceb36aa0..2adb223a895c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -291,12 +291,11 @@ static int s6d27a1_probe(struct spi_device *spi)
return 0;
}
-static int s6d27a1_remove(struct spi_device *spi)
+static void s6d27a1_remove(struct spi_device *spi)
{
struct s6d27a1 *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id s6d27a1_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
index c178d962b0d5..d99afcc672ca 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
@@ -62,10 +62,9 @@ static int s6e63m0_spi_probe(struct spi_device *spi)
s6e63m0_spi_dcs_write, false);
}
-static int s6e63m0_spi_remove(struct spi_device *spi)
+static void s6e63m0_spi_remove(struct spi_device *spi)
{
s6e63m0_remove(&spi->dev);
- return 0;
}
static const struct of_device_id s6e63m0_spi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e46db5e359c..b42c1d816e79 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
err = panel_dpi_probe(dev, panel);
if (err)
goto free_ddc;
+ desc = panel->desc;
} else {
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
@@ -2016,7 +2017,7 @@ static const struct display_timing innolux_g070y2_l01_timing = {
static const struct panel_desc innolux_g070y2_l01 = {
.timings = &innolux_g070y2_l01_timing,
.num_timings = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 152,
.height = 91,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 61e565524542..bbc4569cbcdc 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -387,13 +387,11 @@ static int st7789v_probe(struct spi_device *spi)
return 0;
}
-static int st7789v_remove(struct spi_device *spi)
+static void st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id st7789v_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index ba0b3ead150f..0d7541a33f87 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -655,7 +655,7 @@ static int acx565akm_probe(struct spi_device *spi)
return 0;
}
-static int acx565akm_remove(struct spi_device *spi)
+static void acx565akm_remove(struct spi_device *spi)
{
struct acx565akm_panel *lcd = spi_get_drvdata(spi);
@@ -666,8 +666,6 @@ static int acx565akm_remove(struct spi_device *spi)
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id acx565akm_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index ba0c00d1a001..4dbf8b88f264 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -350,15 +350,13 @@ static int td028ttec1_probe(struct spi_device *spi)
return 0;
}
-static int td028ttec1_remove(struct spi_device *spi)
+static void td028ttec1_remove(struct spi_device *spi)
{
struct td028ttec1_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id td028ttec1_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 1866cdb8f9c1..cf4609bb9b1d 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -463,7 +463,7 @@ static int td043mtea1_probe(struct spi_device *spi)
return 0;
}
-static int td043mtea1_remove(struct spi_device *spi)
+static void td043mtea1_remove(struct spi_device *spi)
{
struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
@@ -472,8 +472,6 @@ static int td043mtea1_remove(struct spi_device *spi)
drm_panel_unprepare(&lcd->panel);
sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
-
- return 0;
}
static const struct of_device_id td043mtea1_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index e3791dad6830..0b1f5a11a055 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -450,12 +450,11 @@ static int tpg110_probe(struct spi_device *spi)
return 0;
}
-static int tpg110_remove(struct spi_device *spi)
+static void tpg110_remove(struct spi_device *spi)
{
struct tpg110 *tpg = spi_get_drvdata(spi);
drm_panel_remove(&tpg->panel);
- return 0;
}
static const struct of_device_id tpg110_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 8bc976f54b80..236f3cb2b594 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -407,12 +407,11 @@ static int ws2401_probe(struct spi_device *spi)
return 0;
}
-static int ws2401_remove(struct spi_device *spi)
+static void ws2401_remove(struct spi_device *spi)
{
struct ws2401 *ws = spi_get_drvdata(spi);
drm_panel_remove(&ws->panel);
- return 0;
}
/*
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0fce73b9a646..70bd84b7ef2b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
* so don't register a backlight device
*/
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->pdev->device == 0x6741))
+ (rdev->pdev->device == 0x6741) &&
+ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
return;
if (!radeon_encoder->enc_priv)
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 377f9cdb5b53..84013faa4756 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
void *ptr;
-
- int i, r;
+ long r;
+ int i;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
@@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
return r ? r : -ETIME;
}
r = radeon_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
return r;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 830bdd5e9b7c..8677c8271678 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
- ret);
- return ret;
- }
-
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 1f7353f0684a..798b542e5916 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
@@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
/*
@@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = {
static const struct vop_win_data rk3399_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3399_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &rk3288_win01_data,
+ { .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &rk3288_win23_data,
+ { .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &rk3288_win23_data,
+ { .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 145833a9d82d..5b3fbee18671 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -111,10 +111,10 @@
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
/* format 15 doesn't exist */
-/* format 16 is P010 YVU */
-#define SUN8I_MIXER_FBFMT_P010_YUV 17
-/* format 18 is P210 YVU */
-#define SUN8I_MIXER_FBFMT_P210_YUV 19
+#define SUN8I_MIXER_FBFMT_P010_YUV 16
+/* format 17 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 18
+/* format 19 is P210 YVU */
/* format 20 is packed YVU444 10-bit */
/* format 21 is packed YUV444 10-bit */
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8cf5aeb9db6c..201f5175ecfe 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -5,6 +5,7 @@ config DRM_TEGRA
depends on COMMON_CLK
depends on DRM
depends on OF
+ select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 1f96e416fa08..d7a731d287d2 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_bus.h>
#include <drm/drm_panel.h>
#include "dp.h"
@@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
+ err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 223ab2ceb7e6..3762d87759d9 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon,
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+ virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9b33c05732aa..ebb025543f8d 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -263,14 +263,12 @@ static int hx8357d_probe(struct spi_device *spi)
return 0;
}
-static int hx8357d_remove(struct spi_device *spi)
+static void hx8357d_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void hx8357d_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index bcc181351236..fc8ed245b0bc 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -193,14 +193,12 @@ static int ili9163_probe(struct spi_device *spi)
return 0;
}
-static int ili9163_remove(struct spi_device *spi)
+static void ili9163_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9163_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 976d3209f164..cc92eb9f2a07 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -411,14 +411,12 @@ static int ili9225_probe(struct spi_device *spi)
return 0;
}
-static int ili9225_remove(struct spi_device *spi)
+static void ili9225_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9225_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 37e0c33399c8..5b8cc770ee7b 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -225,14 +225,12 @@ static int ili9341_probe(struct spi_device *spi)
return 0;
}
-static int ili9341_remove(struct spi_device *spi)
+static void ili9341_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9341_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index e9a63f4b2993..6d655e18e0aa 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -243,14 +243,12 @@ static int ili9486_probe(struct spi_device *spi)
return 0;
}
-static int ili9486_remove(struct spi_device *spi)
+static void ili9486_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9486_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 023de49e7a8e..5e060f6910bb 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -233,14 +233,12 @@ static int mi0283qt_probe(struct spi_device *spi)
return 0;
}
-static int mi0283qt_remove(struct spi_device *spi)
+static void mi0283qt_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void mi0283qt_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 97a775c48cea..beeeb170d0b1 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -1140,14 +1140,12 @@ static int repaper_probe(struct spi_device *spi)
return 0;
}
-static int repaper_remove(struct spi_device *spi)
+static void repaper_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void repaper_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 51b9b9fb3ead..3f38faa1cd8c 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -360,14 +360,12 @@ static int st7586_probe(struct spi_device *spi)
return 0;
}
-static int st7586_remove(struct spi_device *spi)
+static void st7586_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void st7586_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index fc40dd10efa8..29d618093e94 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -247,14 +247,12 @@ static int st7735r_probe(struct spi_device *spi)
return 0;
}
-static int st7735r_remove(struct spi_device *spi)
+static void st7735r_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void st7735r_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 287dbc89ad64..783890e8d43a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (ret)
return ret;
- ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
- if (ret)
- return ret;
+ /*
+ * post_crtc_powerdown will have called pm_runtime_put, so we
+ * don't need it here otherwise we'll get the reference counting
+ * wrong.
+ */
return 0;
}
@@ -671,7 +673,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
- mode = &crtc_state->adjusted_mode;
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
mode->clock * 9 / 10) * 1000;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a229da58962a..9300d3354c51 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1262,7 +1262,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
- int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1297,18 +1296,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
- if (ret) {
- mipi_dsi_host_unregister(&dsi->dsi_host);
- return ret;
- }
-
- return 0;
+ return component_add(&dsi->pdev->dev, &vc4_dsi_ops);
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+
+ component_del(&dsi->pdev->dev, &vc4_dsi_ops);
return 0;
}
@@ -1686,9 +1682,7 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- component_del(&pdev->dev, &vc4_dsi_ops);
mipi_dsi_host_unregister(&dsi->dsi_host);
-
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 053fbaf765ca..3a1626f261e5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
connected = true;
} else {
- unsigned long flags;
- u32 hotplug;
-
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- hotplug = HDMI_READ(HDMI_HOTPLUG);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
- if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+ if (vc4_hdmi->variant->hp_detect &&
+ vc4_hdmi->variant->hp_detect(vc4_hdmi))
connected = true;
}
@@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
unsigned long long tmds_rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return -EINVAL;
@@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
@@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
return channel_map;
}
+static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
+{
+ unsigned long flags;
+ u32 hotplug;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ hotplug = HDMI_READ(HDMI_HOTPLUG);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
+}
+
/* HDMI audio codec callbacks */
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
@@ -1741,6 +1749,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
return PTR_ERR(codec_pdev);
}
+ vc4_hdmi->audio.codec_pdev = codec_pdev;
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
@@ -1780,6 +1789,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
+static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
+{
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2504,7 +2519,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
* vc4_hdmi_disable_scrambling() will thus run at boot, make
* sure it's disabled, and avoid any inconsistency.
*/
- vc4_hdmi->scdc_enabled = true;
+ if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
+ vc4_hdmi->scdc_enabled = true;
ret = variant->init_resources(vc4_hdmi);
if (ret)
@@ -2651,6 +2667,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
kfree(vc4_hdmi->hdmi_regset.regs);
kfree(vc4_hdmi->hd_regset.regs);
+ vc4_hdmi_audio_exit(vc4_hdmi);
vc4_hdmi_cec_exit(vc4_hdmi);
vc4_hdmi_hotplug_exit(vc4_hdmi);
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
@@ -2723,6 +2740,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
@@ -2751,6 +2769,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 36c0b082a43b..6ffdd4ec5fb6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -102,6 +102,9 @@ struct vc4_hdmi_variant {
/* Enables HDR metadata */
bool supports_hdr;
+
+ /* Callback for hardware specific hotplug detect */
+ bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
};
/* HDMI audio information */
@@ -113,6 +116,7 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
struct hdmi_audio_infoframe infoframe;
+ struct platform_device *codec_pdev;
bool streaming;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d6b66636a19b..ea3ecdda561d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1140,15 +1140,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file);
+ int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 44ca23b0ea4e..dd2ff441068e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3879,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference.
*/
-void
+int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle,
- int32_t out_fence_fd, struct sync_file *sync_file)
+ int32_t out_fence_fd)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
- return;
+ return 0;
memset(&fence_rep, 0, sizeof(fence_rep));
@@ -3917,19 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- if (sync_file)
- fput(sync_file->file);
-
- if (fence_rep.fd != -1) {
- put_unused_fd(fence_rep.fd);
- fence_rep.fd = -1;
- }
-
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
+
+ return ret ? -EFAULT : 0;
}
/**
@@ -4266,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
+ }
+ }
+
+ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle, out_fence_fd);
+
+ if (sync_file) {
+ if (ret) {
+ /* usercopy of fence failed, put the file object */
+ fput(sync_file->file);
+ put_unused_fd(out_fence_fd);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle, out_fence_fd,
- sync_file);
-
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
@@ -4293,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/
vmw_validation_unref_lists(&val_ctx);
- return 0;
+ return ret;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c60d395f9e2e..5001b87aebe8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1128,7 +1128,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 4e693e8de2c3..bbd2f4ec08ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2501,7 +2501,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index e08e331e46ae..f87a8705f518 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host)
struct host1x_syncpt *sp_base = host->syncpt;
unsigned int i;
- for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+ for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+ /*
+ * Unassign syncpt from channels for purposes of Tegra186
+ * syncpoint protection. This prevents any channel from
+ * accessing it until it is reassigned.
+ */
+ host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
host1x_hw_syncpt_restore(host, sp_base + i);
+ }
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
@@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
void *ref;
struct host1x_waitlist *waiter;
int err = 0, check_count = 0;
- u32 val;
if (value)
- *value = 0;
-
- /* first check cache */
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = host1x_syncpt_load(sp);
+ *value = host1x_syncpt_load(sp);
+ if (host1x_syncpt_is_expired(sp, thresh))
return 0;
- }
-
- /* try to read from register */
- val = host1x_hw_syncpt_load(sp->host, sp);
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = val;
-
- goto done;
- }
if (!timeout) {
err = -EAGAIN;
@@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
-
- /*
- * Unassign syncpt from channels for purposes of Tegra186
- * syncpoint protection. This prevents any channel from
- * accessing it until it is reassigned.
- */
- host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
}
for (i = 0; i < host->info->nb_bases; i++)
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2503be0253d3..19fa734a9a79 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 800 ms timeout */
+ /* Get response with status within a max of 1600 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
@@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+ if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+ writel(0, privdata->mmio + AMD_P2C_MSG(4));
+ writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->clear_intr)
+ privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+ amd_sfh_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+ int rc;
+
+ pci_intx(privdata->pdev, true);
+
+ rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+ amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+ if (rc) {
+ dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+ privdata->pdev->irq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
}
static const struct amd_mp2_ops amd_sfh_ops_v2 = {
@@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
.response = amd_sfh_wait_response_v2,
+ .clear_intr = amd_sfh_clear_intr_v2,
+ .init_intr = amd_sfh_irq_init_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
@@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
}
}
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->init_intr)
+ return privdata->mp2_ops->init_intr(privdata);
+
+ return 0;
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
@@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
mp2_select_ops(privdata);
+ rc = amd_sfh_irq_init(privdata);
+ if (rc) {
+ dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
rc = amd_sfh_hid_client_init(privdata);
- if (rc)
+ if (rc) {
+ amd_sfh_clear_intr(privdata);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
+ }
+
+ amd_sfh_clear_intr(privdata);
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
@@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
}
}
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
@@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
}
}
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index ae30e059f847..97b99861fae2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -49,7 +49,7 @@ union sfh_cmd_base {
} s;
struct {
u32 cmd_id : 4;
- u32 intr_enable : 1;
+ u32 intr_disable : 1;
u32 rsvd1 : 3;
u32 length : 7;
u32 mem_type : 1;
@@ -141,5 +141,7 @@ struct amd_mp2_ops {
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index be41f83b0289..76095bd53c65 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -27,6 +27,7 @@
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+#define ILLUMINANCE_MASK GENMASK(14, 0)
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
get_common_inputs(&als_input.common_property, report_id);
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
if (supported_input == V2_STATUS)
- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ als_input.illuminance_value =
+ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 24802a4a636e..7dc89dc6b0f0 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 26c31d759914..81e7e404a5fc 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_F22] = "F22", [KEY_F23] = "F23",
[KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
[KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
- [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
+ [KEY_PROG4] = "Prog4",
+ [KEY_ALL_APPLICATIONS] = "AllApplications",
+ [KEY_SUSPEND] = "Suspend",
[KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
[KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
[KEY_PRINT] = "Print", [KEY_HP] = "HP",
@@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_ASSISTANT] = "Assistant",
[KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
[KEY_EMOJI_PICKER] = "EmojiPicker",
+ [KEY_DICTATE] = "Dictate",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 8e960d7b233b..2876cb6a7dca 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct elo_priv *priv;
int ret;
- struct usb_device *udev;
if (!hid_is_usb(hdev))
return -EINVAL;
@@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENOMEM;
INIT_DELAYED_WORK(&priv->work, elo_work);
- udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
- priv->usbdev = usb_get_dev(udev);
+ priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
hid_set_drvdata(hdev, priv);
@@ -270,8 +268,6 @@ static void elo_remove(struct hid_device *hdev)
{
struct elo_priv *priv = hid_get_drvdata(hdev);
- usb_put_dev(priv->usbdev);
-
hid_hw_stop(hdev);
cancel_delayed_work_sync(&priv->work);
kfree(priv);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85975031389b..78bd3ddda442 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1370,6 +1370,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 112901d2d8d2..56ec27398a00 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
+ case 0x0d8: map_key_clear(KEY_DICTATE); break;
case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break;
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
@@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+ case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7106b921b53c..c358778e070b 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0f:
+ case 0x11:
device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index b6a9a0f3966e..2204de889739 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -2128,6 +2128,10 @@ static int nintendo_hid_probe(struct hid_device *hdev,
spin_lock_init(&ctlr->lock);
ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq",
WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ if (!ctlr->rumble_queue) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker);
ret = hid_parse(hdev);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 9af1dc8ae3a2..c066ba901867 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 03b935ff02d5..c3e6d69fdfbd 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -64,7 +64,9 @@ struct tm_wheel_info {
*/
static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0306, 0x0006, "Thrustmaster T150RS"},
+ {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
+ {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
@@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
return;
}
+ if (usbif->cur_altsetting->desc.bNumEndpoints < 2) {
+ kfree(send_buf);
+ hid_err(hdev, "Wrong number of endpoints?\n");
+ return;
+ }
+
ep = &usbif->cur_altsetting->endpoint[1];
b_ep = ep->desc.bEndpointAddress;
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index efa6140915f4..42ceb2058a09 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -144,7 +144,7 @@ out:
static int vivaldi_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
- return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group);
+ return devm_device_add_group(&hdev->dev, &input_attribute_group);
}
static const struct hid_device_id vivaldi_table[] = {
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index b4dad66fa954..ec6c73f75ffe 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
struct regulator *vdd;
struct notifier_block nb;
- struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
container_of(nb, struct i2c_hid_of_goodix, nb);
int ret = NOTIFY_OK;
- mutex_lock(&ihid_goodix->regulator_mutex);
-
switch (event) {
case REGULATOR_EVENT_PRE_DISABLE:
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
break;
}
- mutex_unlock(&ihid_goodix->regulator_mutex);
-
return ret;
}
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
if (!ihid_goodix)
return -ENOMEM;
- mutex_init(&ihid_goodix->regulator_mutex);
-
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
* long. Holding the controller in reset apparently draws extra
* power.
*/
- mutex_lock(&ihid_goodix->regulator_mutex);
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret) {
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ if (ret)
return dev_err_probe(&client->dev, ret,
"regulator notifier request failed\n");
- }
/*
* If someone else is holding the regulator on (or the regulator is
* an always-on one) we might never be told to deassert reset. Do it
- * now. Here we'll assume that someone else might have _just
- * barely_ turned the regulator on so we'll do the full
- * "post_power_delay" just in case.
+ * now... and temporarily bump the regulator reference count just to
+ * make sure it is impossible for this to race with our own notifier!
+ * We also assume that someone else might have _just barely_ turned
+ * the regulator on so we'll do the full "post_power_delay" just in
+ * case.
*/
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ regulator_disable(ihid_goodix->vdd);
+ }
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index ca873a3b98db..f2d05bff4245 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1660,6 +1660,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
unsigned long t;
int ret;
+ /*
+ * max_pkt_size should be large enough for one vmbus packet header plus
+ * our receive buffer size. Hyper-V sends messages up to
+ * HV_HYP_PAGE_SIZE bytes long on balloon channel.
+ */
+ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
balloon_onchannelcallback, dev);
if (ret)
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index eb2833d2b5d0..832885198643 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -13,7 +13,7 @@
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 17bf55fe3169..12a2b37e87f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
ret = sysfs_create_group(kobj, &vmbus_chan_group);
@@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
+ kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
@@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type,
return child_device_obj;
}
-static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
@@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
- child_device_obj->device.dma_mask = &vmbus_dma_mask;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8df25f1079ba..9ab4e9b3d27b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -174,6 +174,7 @@ config SENSORS_ADM9240
config SENSORS_ADT7X10
tristate
+ select REGMAP
help
This module contains common code shared by the ADT7310/ADT7320 and
ADT7410/ADT7420 temperature monitoring chip drivers.
@@ -505,6 +506,21 @@ config SENSORS_DELL_SMM
When option I8K is also enabled this driver provides legacy /proc/i8k
userspace interface for i8kutils package.
+config I8K
+ bool "Legacy /proc/i8k interface of Dell laptop SMM BIOS hwmon driver"
+ depends on SENSORS_DELL_SMM
+ depends on PROC_FS
+ help
+ This option enables the legacy /proc/i8k userspace interface of the
+ dell-smm-hwmon driver. The character file /proc/i8k exposes the BIOS
+ version, temperatures and allows control of fan speeds of some Dell
+ laptops. Sometimes it also reports power and hotkey status.
+
+ This interface is required to run programs from the i8kutils package.
+
+ Say Y if you intend to run userspace programs that use this interface.
+ Say N otherwise.
+
config SENSORS_DA9052_ADC
tristate "Dialog DA9052/DA9053 ADC"
depends on PMIC_DA9052
@@ -1208,8 +1224,8 @@ config SENSORS_LM70
depends on SPI_MASTER
help
If you say yes here you get support for the National Semiconductor
- LM70, LM71, LM74 and Texas Instruments TMP121/TMP123 digital tempera-
- ture sensor chips.
+ LM70, LM71, LM74 and Texas Instruments TMP121/TMP123, TMP122/TMP124,
+ TMP125 digital temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called lm70.
@@ -1288,6 +1304,7 @@ config SENSORS_LM80
config SENSORS_LM83
tristate "National Semiconductor LM83 and compatibles"
depends on I2C
+ select REGMAP
help
If you say yes here you get support for National Semiconductor
LM82 and LM83 sensor chips.
@@ -1979,6 +1996,17 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP464
+ tristate "Texas Instruments TMP464 and compatible"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP464
+ and TMP468 temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp464.
+
config SENSORS_TMP513
tristate "Texas Instruments TMP513 and compatibles"
depends on I2C
@@ -2252,16 +2280,31 @@ config SENSORS_ASUS_WMI
config SENSORS_ASUS_WMI_EC
tristate "ASUS WMI B550/X570"
- depends on ACPI_WMI
+ depends on ACPI_WMI && SENSORS_ASUS_EC=n
help
If you say yes here you get support for the ACPI embedded controller
hardware monitoring interface found in B550/X570 ASUS motherboards.
This driver will provide readings of fans, voltages and temperatures
through the system firmware.
+ This driver is deprecated in favor of the ASUS EC Sensors driver
+ which provides fully compatible output.
+
This driver can also be built as a module. If so, the module
will be called asus_wmi_sensors_ec.
+config SENSORS_ASUS_EC
+ tristate "ASUS EC Sensors"
+ depends on X86
+ help
+ If you say yes here you get support for the ACPI embedded controller
+ hardware monitoring interface found in ASUS motherboards. The driver
+ currently supports B550/X570 boards, although other ASUS boards might
+ provide this monitoring interface as well.
+
+ This driver can also be built as a module. If so, the module
+ will be called asus_ec_sensors.
+
endif # ACPI
endif # HWMON
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 185f946d698b..4ed138d0621f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
# APCI drivers
obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
+obj-$(CONFIG_SENSORS_ASUS_EC) += asus-ec-sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI) += asus_wmi_sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI_EC) += asus_wmi_ec_sensors.o
@@ -194,6 +195,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP464) += tmp464.o
obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index e5bc5ce09f4e..de37bce24fa6 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -194,7 +194,7 @@ out_err:
return status;
}
-static int adcxx_remove(struct spi_device *spi)
+static void adcxx_remove(struct spi_device *spi)
{
struct adcxx *adc = spi_get_drvdata(spi);
int i;
@@ -205,8 +205,6 @@ static int adcxx_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
mutex_unlock(&adc->lock);
-
- return 0;
}
static const struct spi_device_id adcxx_ids[] = {
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index c40cac16af68..1efc0bdcceab 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -38,16 +39,13 @@ static const u8 adt7310_reg_table[] = {
#define AD7310_COMMAND(reg) (adt7310_reg_table[(reg)] << ADT7310_CMD_REG_OFFSET)
-static int adt7310_spi_read_word(struct device *dev, u8 reg)
+static int adt7310_spi_read_word(struct spi_device *spi, u8 reg)
{
- struct spi_device *spi = to_spi_device(dev);
-
return spi_w8r16be(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
-static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
+static int adt7310_spi_write_word(struct spi_device *spi, u8 reg, u16 data)
{
- struct spi_device *spi = to_spi_device(dev);
u8 buf[3];
buf[0] = AD7310_COMMAND(reg);
@@ -56,17 +54,13 @@ static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
return spi_write(spi, buf, sizeof(buf));
}
-static int adt7310_spi_read_byte(struct device *dev, u8 reg)
+static int adt7310_spi_read_byte(struct spi_device *spi, u8 reg)
{
- struct spi_device *spi = to_spi_device(dev);
-
return spi_w8r8(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
-static int adt7310_spi_write_byte(struct device *dev, u8 reg,
- u8 data)
+static int adt7310_spi_write_byte(struct spi_device *spi, u8 reg, u8 data)
{
- struct spi_device *spi = to_spi_device(dev);
u8 buf[2];
buf[0] = AD7310_COMMAND(reg);
@@ -75,25 +69,79 @@ static int adt7310_spi_write_byte(struct device *dev, u8 reg,
return spi_write(spi, buf, sizeof(buf));
}
-static const struct adt7x10_ops adt7310_spi_ops = {
- .read_word = adt7310_spi_read_word,
- .write_word = adt7310_spi_write_word,
- .read_byte = adt7310_spi_read_byte,
- .write_byte = adt7310_spi_write_byte,
-};
-
-static int adt7310_spi_probe(struct spi_device *spi)
+static bool adt7310_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- return adt7x10_probe(&spi->dev, spi_get_device_id(spi)->name, spi->irq,
- &adt7310_spi_ops);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_STATUS:
+ return true;
+ default:
+ return false;
+ }
}
-static int adt7310_spi_remove(struct spi_device *spi)
+static int adt7310_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- adt7x10_remove(&spi->dev, spi->irq);
+ struct spi_device *spi = context;
+ int regval;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ regval = adt7310_spi_read_word(spi, reg);
+ break;
+ default:
+ regval = adt7310_spi_read_byte(spi, reg);
+ break;
+ }
+ if (regval < 0)
+ return regval;
+ *val = regval;
return 0;
}
+static int adt7310_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct spi_device *spi = context;
+ int ret;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ ret = adt7310_spi_write_word(spi, reg, val);
+ break;
+ default:
+ ret = adt7310_spi_write_byte(spi, reg, val);
+ break;
+ }
+ return ret;
+}
+
+static const struct regmap_config adt7310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adt7310_regmap_is_volatile,
+ .reg_read = adt7310_reg_read,
+ .reg_write = adt7310_reg_write,
+};
+
+static int adt7310_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(&spi->dev, NULL, spi, &adt7310_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adt7x10_probe(&spi->dev, spi_get_device_id(spi)->name, spi->irq,
+ regmap);
+}
+
static const struct spi_device_id adt7310_id[] = {
{ "adt7310", 0 },
{ "adt7320", 0 },
@@ -107,7 +155,6 @@ static struct spi_driver adt7310_driver = {
.pm = ADT7X10_DEV_PM_OPS,
},
.probe = adt7310_spi_probe,
- .remove = adt7310_spi_remove,
.id_table = adt7310_id,
};
module_spi_driver(adt7310_driver);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 973db057427b..aede5baca7b9 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -9,49 +9,82 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include "adt7x10.h"
-static int adt7410_i2c_read_word(struct device *dev, u8 reg)
+static bool adt7410_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- return i2c_smbus_read_word_swapped(to_i2c_client(dev), reg);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_STATUS:
+ return true;
+ default:
+ return false;
+ }
}
-static int adt7410_i2c_write_word(struct device *dev, u8 reg, u16 data)
+static int adt7410_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- return i2c_smbus_write_word_swapped(to_i2c_client(dev), reg, data);
-}
+ struct i2c_client *client = context;
+ int regval;
-static int adt7410_i2c_read_byte(struct device *dev, u8 reg)
-{
- return i2c_smbus_read_byte_data(to_i2c_client(dev), reg);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ regval = i2c_smbus_read_word_swapped(client, reg);
+ break;
+ default:
+ regval = i2c_smbus_read_byte_data(client, reg);
+ break;
+ }
+ if (regval < 0)
+ return regval;
+ *val = regval;
+ return 0;
}
-static int adt7410_i2c_write_byte(struct device *dev, u8 reg, u8 data)
+static int adt7410_reg_write(void *context, unsigned int reg, unsigned int val)
{
- return i2c_smbus_write_byte_data(to_i2c_client(dev), reg, data);
+ struct i2c_client *client = context;
+ int ret;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ ret = i2c_smbus_write_word_swapped(client, reg, val);
+ break;
+ default:
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ break;
+ }
+ return ret;
}
-static const struct adt7x10_ops adt7410_i2c_ops = {
- .read_word = adt7410_i2c_read_word,
- .write_word = adt7410_i2c_write_word,
- .read_byte = adt7410_i2c_read_byte,
- .write_byte = adt7410_i2c_write_byte,
+static const struct regmap_config adt7410_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADT7X10_ID,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adt7410_regmap_is_volatile,
+ .reg_read = adt7410_reg_read,
+ .reg_write = adt7410_reg_write,
};
static int adt7410_i2c_probe(struct i2c_client *client)
{
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ struct regmap *regmap;
- return adt7x10_probe(&client->dev, NULL, client->irq, &adt7410_i2c_ops);
-}
+ regmap = devm_regmap_init(&client->dev, NULL, client,
+ &adt7410_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
-static int adt7410_i2c_remove(struct i2c_client *client)
-{
- adt7x10_remove(&client->dev, client->irq);
- return 0;
+ return adt7x10_probe(&client->dev, client->name, client->irq, regmap);
}
static const struct i2c_device_id adt7410_ids[] = {
@@ -68,7 +101,6 @@ static struct i2c_driver adt7410_driver = {
.pm = ADT7X10_DEV_PM_OPS,
},
.probe_new = adt7410_i2c_probe,
- .remove = adt7410_i2c_remove,
.id_table = adt7410_ids,
.address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index d519aca4a9d6..fb6d14d213a1 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -662,6 +662,9 @@ static int adt7470_fan_write(struct device *dev, u32 attr, int channel, long val
struct adt7470_data *data = dev_get_drvdata(dev);
int err;
+ if (val <= 0)
+ return -EINVAL;
+
val = FAN_RPM_TO_PERIOD(val);
val = clamp_val(val, 1, 65534);
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index e9d33aa78a19..ce54bffab2ec 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -8,16 +8,17 @@
* and adt7410.c from iio-staging by Sonic Zhang <sonic.zhang@analog.com>
*/
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include "adt7x10.h"
@@ -53,80 +54,57 @@
/* Each client has this additional data */
struct adt7x10_data {
- const struct adt7x10_ops *ops;
- const char *name;
- struct device *hwmon_dev;
+ struct regmap *regmap;
struct mutex update_lock;
u8 config;
u8 oldconfig;
- bool valid; /* true if registers valid */
- unsigned long last_updated; /* In jiffies */
- s16 temp[4]; /* Register values,
- 0 = input
- 1 = high
- 2 = low
- 3 = critical */
- u8 hyst; /* hysteresis offset */
+ bool valid; /* true if temperature valid */
};
-static int adt7x10_read_byte(struct device *dev, u8 reg)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->read_byte(dev, reg);
-}
-
-static int adt7x10_write_byte(struct device *dev, u8 reg, u8 data)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->write_byte(dev, reg, data);
-}
-
-static int adt7x10_read_word(struct device *dev, u8 reg)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->read_word(dev, reg);
-}
-
-static int adt7x10_write_word(struct device *dev, u8 reg, u16 data)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->write_word(dev, reg, data);
-}
+enum {
+ adt7x10_temperature = 0,
+ adt7x10_t_alarm_high,
+ adt7x10_t_alarm_low,
+ adt7x10_t_crit,
+};
-static const u8 ADT7X10_REG_TEMP[4] = {
- ADT7X10_TEMPERATURE, /* input */
- ADT7X10_T_ALARM_HIGH, /* high */
- ADT7X10_T_ALARM_LOW, /* low */
- ADT7X10_T_CRIT, /* critical */
+static const u8 ADT7X10_REG_TEMP[] = {
+ [adt7x10_temperature] = ADT7X10_TEMPERATURE, /* input */
+ [adt7x10_t_alarm_high] = ADT7X10_T_ALARM_HIGH, /* high */
+ [adt7x10_t_alarm_low] = ADT7X10_T_ALARM_LOW, /* low */
+ [adt7x10_t_crit] = ADT7X10_T_CRIT, /* critical */
};
static irqreturn_t adt7x10_irq_handler(int irq, void *private)
{
struct device *dev = private;
- int status;
+ struct adt7x10_data *d = dev_get_drvdata(dev);
+ unsigned int status;
+ int ret;
- status = adt7x10_read_byte(dev, ADT7X10_STATUS);
- if (status < 0)
+ ret = regmap_read(d->regmap, ADT7X10_STATUS, &status);
+ if (ret < 0)
return IRQ_HANDLED;
if (status & ADT7X10_STAT_T_HIGH)
- sysfs_notify(&dev->kobj, NULL, "temp1_max_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_max_alarm, 0);
if (status & ADT7X10_STAT_T_LOW)
- sysfs_notify(&dev->kobj, NULL, "temp1_min_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_min_alarm, 0);
if (status & ADT7X10_STAT_T_CRIT)
- sysfs_notify(&dev->kobj, NULL, "temp1_crit_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_crit_alarm, 0);
return IRQ_HANDLED;
}
-static int adt7x10_temp_ready(struct device *dev)
+static int adt7x10_temp_ready(struct regmap *regmap)
{
- int i, status;
+ unsigned int status;
+ int i, ret;
for (i = 0; i < 6; i++) {
- status = adt7x10_read_byte(dev, ADT7X10_STATUS);
- if (status < 0)
- return status;
+ ret = regmap_read(regmap, ADT7X10_STATUS, &status);
+ if (ret < 0)
+ return ret;
if (!(status & ADT7X10_STAT_NOT_RDY))
return 0;
msleep(60);
@@ -134,71 +112,10 @@ static int adt7x10_temp_ready(struct device *dev)
return -ETIMEDOUT;
}
-static int adt7x10_update_temp(struct device *dev)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int ret = 0;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- int temp;
-
- dev_dbg(dev, "Starting update\n");
-
- ret = adt7x10_temp_ready(dev); /* check for new value */
- if (ret)
- goto abort;
-
- temp = adt7x10_read_word(dev, ADT7X10_REG_TEMP[0]);
- if (temp < 0) {
- ret = temp;
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_REG_TEMP[0], ret);
- goto abort;
- }
- data->temp[0] = temp;
- data->last_updated = jiffies;
- data->valid = true;
- }
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-static int adt7x10_fill_cache(struct device *dev)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int ret;
- int i;
-
- for (i = 1; i < ARRAY_SIZE(data->temp); i++) {
- ret = adt7x10_read_word(dev, ADT7X10_REG_TEMP[i]);
- if (ret < 0) {
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_REG_TEMP[i], ret);
- return ret;
- }
- data->temp[i] = ret;
- }
-
- ret = adt7x10_read_byte(dev, ADT7X10_T_HYST);
- if (ret < 0) {
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_T_HYST, ret);
- return ret;
- }
- data->hyst = ret;
-
- return 0;
-}
-
static s16 ADT7X10_TEMP_TO_REG(long temp)
{
return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7X10_TEMP_MIN,
- ADT7X10_TEMP_MAX) * 128, 1000);
+ ADT7X10_TEMP_MAX) * 128, 1000);
}
static int ADT7X10_REG_TO_TEMP(struct adt7x10_data *data, s16 reg)
@@ -215,170 +132,233 @@ static int ADT7X10_REG_TO_TEMP(struct adt7x10_data *data, s16 reg)
/*-----------------------------------------------------------------------*/
-/* sysfs attributes for hwmon */
-
-static ssize_t adt7x10_temp_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_temp_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
-
-
- if (attr->index == 0) {
- int ret;
+ unsigned int regval;
+ int ret;
- ret = adt7x10_update_temp(dev);
- if (ret)
+ mutex_lock(&data->update_lock);
+ if (index == adt7x10_temperature && !data->valid) {
+ /* wait for valid temperature */
+ ret = adt7x10_temp_ready(data->regmap);
+ if (ret) {
+ mutex_unlock(&data->update_lock);
return ret;
+ }
+ data->valid = true;
}
+ mutex_unlock(&data->update_lock);
- return sprintf(buf, "%d\n", ADT7X10_REG_TO_TEMP(data,
- data->temp[attr->index]));
+ ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &regval);
+ if (ret)
+ return ret;
+
+ *val = ADT7X10_REG_TO_TEMP(data, regval);
+ return 0;
}
-static ssize_t adt7x10_temp_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static int adt7x10_temp_write(struct adt7x10_data *data, int index, long temp)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int nr = attr->index;
- long temp;
int ret;
- ret = kstrtol(buf, 10, &temp);
- if (ret)
- return ret;
-
mutex_lock(&data->update_lock);
- data->temp[nr] = ADT7X10_TEMP_TO_REG(temp);
- ret = adt7x10_write_word(dev, ADT7X10_REG_TEMP[nr], data->temp[nr]);
- if (ret)
- count = ret;
+ ret = regmap_write(data->regmap, ADT7X10_REG_TEMP[index],
+ ADT7X10_TEMP_TO_REG(temp));
mutex_unlock(&data->update_lock);
- return count;
+ return ret;
}
-static ssize_t adt7x10_t_hyst_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int nr = attr->index;
- int hyst;
+ int hyst, temp, ret;
+
+ mutex_lock(&data->update_lock);
+ ret = regmap_read(data->regmap, ADT7X10_T_HYST, &hyst);
+ if (ret) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &temp);
+ mutex_unlock(&data->update_lock);
+ if (ret)
+ return ret;
- hyst = (data->hyst & ADT7X10_T_HYST_MASK) * 1000;
+ hyst = (hyst & ADT7X10_T_HYST_MASK) * 1000;
/*
* hysteresis is stored as a 4 bit offset in the device, convert it
* to an absolute value
*/
- if (nr == 2) /* min has positive offset, others have negative */
+ /* min has positive offset, others have negative */
+ if (index == adt7x10_t_alarm_low)
hyst = -hyst;
- return sprintf(buf, "%d\n",
- ADT7X10_REG_TO_TEMP(data, data->temp[nr]) - hyst);
+
+ *val = ADT7X10_REG_TO_TEMP(data, temp) - hyst;
+ return 0;
}
-static ssize_t adt7x10_t_hyst_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static int adt7x10_hyst_write(struct adt7x10_data *data, long hyst)
{
- struct adt7x10_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
int limit, ret;
- long hyst;
- ret = kstrtol(buf, 10, &hyst);
- if (ret)
- return ret;
+ mutex_lock(&data->update_lock);
+
/* convert absolute hysteresis value to a 4 bit delta value */
- limit = ADT7X10_REG_TO_TEMP(data, data->temp[1]);
- hyst = clamp_val(hyst, ADT7X10_TEMP_MIN, ADT7X10_TEMP_MAX);
- data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000),
- 0, ADT7X10_T_HYST_MASK);
- ret = adt7x10_write_byte(dev, ADT7X10_T_HYST, data->hyst);
- if (ret)
- return ret;
+ ret = regmap_read(data->regmap, ADT7X10_T_ALARM_HIGH, &regval);
+ if (ret < 0)
+ goto abort;
+
+ limit = ADT7X10_REG_TO_TEMP(data, regval);
- return count;
+ hyst = clamp_val(hyst, ADT7X10_TEMP_MIN, ADT7X10_TEMP_MAX);
+ regval = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
+ ADT7X10_T_HYST_MASK);
+ ret = regmap_write(data->regmap, ADT7X10_T_HYST, regval);
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
}
-static ssize_t adt7x10_alarm_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_alarm_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ unsigned int status;
int ret;
- ret = adt7x10_read_byte(dev, ADT7X10_STATUS);
+ ret = regmap_read(data->regmap, ADT7X10_STATUS, &status);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", !!(ret & attr->index));
+ *val = !!(status & index);
+
+ return 0;
+}
+
+static umode_t adt7x10_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ case hwmon_temp_crit:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ default:
+ break;
+ }
+
+ return 0;
}
-static ssize_t name_show(struct device *dev, struct device_attribute *da,
- char *buf)
+static int adt7x10_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ switch (attr) {
+ case hwmon_temp_input:
+ return adt7x10_temp_read(data, adt7x10_temperature, val);
+ case hwmon_temp_max:
+ return adt7x10_temp_read(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min:
+ return adt7x10_temp_read(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit:
+ return adt7x10_temp_read(data, adt7x10_t_crit, val);
+ case hwmon_temp_max_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_crit, val);
+ case hwmon_temp_min_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_LOW, val);
+ case hwmon_temp_max_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_HIGH, val);
+ case hwmon_temp_crit_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_CRIT, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, adt7x10_temp, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, adt7x10_temp, 1);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, adt7x10_temp, 2);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, adt7x10_temp, 3);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, adt7x10_t_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_hyst, adt7x10_t_hyst, 2);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, adt7x10_t_hyst, 3);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_LOW);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_HIGH);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_CRIT);
-static DEVICE_ATTR_RO(name);
-
-static struct attribute *adt7x10_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- NULL
+static int adt7x10_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adt7x10_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return adt7x10_temp_write(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min:
+ return adt7x10_temp_write(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit:
+ return adt7x10_temp_write(data, adt7x10_t_crit, val);
+ case hwmon_temp_max_hyst:
+ return adt7x10_hyst_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *adt7x10_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_MAX_HYST | HWMON_T_MIN_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM),
+ NULL,
+};
+
+static const struct hwmon_ops adt7x10_hwmon_ops = {
+ .is_visible = adt7x10_is_visible,
+ .read = adt7x10_read,
+ .write = adt7x10_write,
};
-static const struct attribute_group adt7x10_group = {
- .attrs = adt7x10_attributes,
+static const struct hwmon_chip_info adt7x10_chip_info = {
+ .ops = &adt7x10_hwmon_ops,
+ .info = adt7x10_info,
};
+static void adt7x10_restore_config(void *private)
+{
+ struct adt7x10_data *data = private;
+
+ regmap_write(data->regmap, ADT7X10_CONFIG, data->oldconfig);
+}
+
int adt7x10_probe(struct device *dev, const char *name, int irq,
- const struct adt7x10_ops *ops)
+ struct regmap *regmap)
{
struct adt7x10_data *data;
+ unsigned int config;
+ struct device *hdev;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->ops = ops;
- data->name = name;
+ data->regmap = regmap;
dev_set_drvdata(dev, data);
mutex_init(&data->update_lock);
/* configure as specified */
- ret = adt7x10_read_byte(dev, ADT7X10_CONFIG);
+ ret = regmap_read(regmap, ADT7X10_CONFIG, &config);
if (ret < 0) {
dev_dbg(dev, "Can't read config? %d\n", ret);
return ret;
}
- data->oldconfig = ret;
+ data->oldconfig = config;
/*
* Set to 16 bit resolution, continous conversion and comparator mode.
@@ -389,92 +369,49 @@ int adt7x10_probe(struct device *dev, const char *name, int irq,
data->config |= ADT7X10_FULL | ADT7X10_RESOLUTION | ADT7X10_EVENT_MODE;
if (data->config != data->oldconfig) {
- ret = adt7x10_write_byte(dev, ADT7X10_CONFIG, data->config);
+ ret = regmap_write(regmap, ADT7X10_CONFIG, data->config);
if (ret)
return ret;
- }
- dev_dbg(dev, "Config %02x\n", data->config);
-
- ret = adt7x10_fill_cache(dev);
- if (ret)
- goto exit_restore;
-
- /* Register sysfs hooks */
- ret = sysfs_create_group(&dev->kobj, &adt7x10_group);
- if (ret)
- goto exit_restore;
-
- /*
- * The I2C device will already have it's own 'name' attribute, but for
- * the SPI device we need to register it. name will only be non NULL if
- * the device doesn't register the 'name' attribute on its own.
- */
- if (name) {
- ret = device_create_file(dev, &dev_attr_name);
+ ret = devm_add_action_or_reset(dev, adt7x10_restore_config, data);
if (ret)
- goto exit_remove;
+ return ret;
}
+ dev_dbg(dev, "Config %02x\n", data->config);
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto exit_remove_name;
- }
+ hdev = devm_hwmon_device_register_with_info(dev, name, data,
+ &adt7x10_chip_info, NULL);
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
if (irq > 0) {
- ret = request_threaded_irq(irq, NULL, adt7x10_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), dev);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ adt7x10_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(dev), hdev);
if (ret)
- goto exit_hwmon_device_unregister;
+ return ret;
}
return 0;
-
-exit_hwmon_device_unregister:
- hwmon_device_unregister(data->hwmon_dev);
-exit_remove_name:
- if (name)
- device_remove_file(dev, &dev_attr_name);
-exit_remove:
- sysfs_remove_group(&dev->kobj, &adt7x10_group);
-exit_restore:
- adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
- return ret;
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-void adt7x10_remove(struct device *dev, int irq)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
-
- if (irq > 0)
- free_irq(irq, dev);
-
- hwmon_device_unregister(data->hwmon_dev);
- if (data->name)
- device_remove_file(dev, &dev_attr_name);
- sysfs_remove_group(&dev->kobj, &adt7x10_group);
- if (data->oldconfig != data->config)
- adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
-}
-EXPORT_SYMBOL_GPL(adt7x10_remove);
-
#ifdef CONFIG_PM_SLEEP
static int adt7x10_suspend(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return adt7x10_write_byte(dev, ADT7X10_CONFIG,
- data->config | ADT7X10_PD);
+ return regmap_write(data->regmap, ADT7X10_CONFIG,
+ data->config | ADT7X10_PD);
}
static int adt7x10_resume(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return adt7x10_write_byte(dev, ADT7X10_CONFIG, data->config);
+ return regmap_write(data->regmap, ADT7X10_CONFIG, data->config);
}
SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index a1ae682eb32e..ba22c32c8355 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -17,16 +17,8 @@
struct device;
-struct adt7x10_ops {
- int (*read_byte)(struct device *, u8 reg);
- int (*write_byte)(struct device *, u8 reg, u8 data);
- int (*read_word)(struct device *, u8 reg);
- int (*write_word)(struct device *, u8 reg, u16 data);
-};
-
int adt7x10_probe(struct device *dev, const char *name, int irq,
- const struct adt7x10_ops *ops);
-void adt7x10_remove(struct device *dev, int irq);
+ struct regmap *regmap);
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index fb9341a53051..525809cf7c95 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,32 +1,41 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * hwmon driver for Aquacomputer D5 Next watercooling pump
+ * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk 360)
*
- * The D5 Next sends HID reports (with ID 0x01) every second to report sensor values
- * (coolant temperature, pump and fan speed, voltage, current and power). It responds to
- * Get_Report requests, but returns a dummy value of no use.
+ * Aquacomputer devices send HID reports (with ID 0x01) every second to report
+ * sensor values.
*
* Copyright 2021 Aleksa Savic <savicaleksa83@gmail.com>
*/
-#include <asm/unaligned.h>
#include <linux/debugfs.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <asm/unaligned.h>
-#define DRIVER_NAME "aquacomputer-d5next"
+#define USB_VENDOR_ID_AQUACOMPUTER 0x0c70
+#define USB_PRODUCT_ID_D5NEXT 0xf00e
+#define USB_PRODUCT_ID_FARBWERK360 0xf010
-#define D5NEXT_STATUS_REPORT_ID 0x01
-#define D5NEXT_STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */
+enum kinds { d5next, farbwerk360 };
-/* Register offsets for the D5 Next pump */
+static const char *const aqc_device_names[] = {
+ [d5next] = "d5next",
+ [farbwerk360] = "farbwerk360"
+};
-#define D5NEXT_SERIAL_FIRST_PART 3
-#define D5NEXT_SERIAL_SECOND_PART 5
-#define D5NEXT_FIRMWARE_VERSION 13
+#define DRIVER_NAME "aquacomputer_d5next"
+
+#define STATUS_REPORT_ID 0x01
+#define STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */
+#define SERIAL_FIRST_PART 3
+#define SERIAL_SECOND_PART 5
+#define FIRMWARE_VERSION 13
+
+/* Register offsets for the D5 Next pump */
#define D5NEXT_POWER_CYCLES 24
#define D5NEXT_COOLANT_TEMP 87
@@ -44,76 +53,118 @@
#define D5NEXT_PUMP_CURRENT 112
#define D5NEXT_FAN_CURRENT 99
-/* Labels for provided values */
+/* Register offsets for the Farbwerk 360 RGB controller */
+#define FARBWERK360_NUM_SENSORS 4
+#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_SENSOR_SIZE 0x02
+#define FARBWERK360_SENSOR_DISCONNECTED 0x7FFF
-#define L_COOLANT_TEMP "Coolant temp"
+/* Labels for D5 Next */
+#define L_D5NEXT_COOLANT_TEMP "Coolant temp"
-#define L_PUMP_SPEED "Pump speed"
-#define L_FAN_SPEED "Fan speed"
-
-#define L_PUMP_POWER "Pump power"
-#define L_FAN_POWER "Fan power"
-
-#define L_PUMP_VOLTAGE "Pump voltage"
-#define L_FAN_VOLTAGE "Fan voltage"
-#define L_5V_VOLTAGE "+5V voltage"
-
-#define L_PUMP_CURRENT "Pump current"
-#define L_FAN_CURRENT "Fan current"
+static const char *const label_d5next_speeds[] = {
+ "Pump speed",
+ "Fan speed"
+};
-static const char *const label_speeds[] = {
- L_PUMP_SPEED,
- L_FAN_SPEED,
+static const char *const label_d5next_power[] = {
+ "Pump power",
+ "Fan power"
};
-static const char *const label_power[] = {
- L_PUMP_POWER,
- L_FAN_POWER,
+static const char *const label_d5next_voltages[] = {
+ "Pump voltage",
+ "Fan voltage",
+ "+5V voltage"
};
-static const char *const label_voltages[] = {
- L_PUMP_VOLTAGE,
- L_FAN_VOLTAGE,
- L_5V_VOLTAGE,
+static const char *const label_d5next_current[] = {
+ "Pump current",
+ "Fan current"
};
-static const char *const label_current[] = {
- L_PUMP_CURRENT,
- L_FAN_CURRENT,
+/* Labels for Farbwerk 360 temperature sensors */
+static const char *const label_temp_sensors[] = {
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4"
};
-struct d5next_data {
+struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
struct dentry *debugfs;
- s32 temp_input;
+ enum kinds kind;
+ const char *name;
+
+ /* General info, same across all devices */
+ u32 serial_number[2];
+ u16 firmware_version;
+
+ /* D5 Next specific - how many times the device was powered on */
+ u32 power_cycles;
+
+ /* Sensor values */
+ s32 temp_input[4];
u16 speed_input[2];
u32 power_input[2];
u16 voltage_input[3];
u16 current_input[2];
- u32 serial_number[2];
- u16 firmware_version;
- u32 power_cycles; /* How many times the device was powered on */
+
unsigned long updated;
};
-static umode_t d5next_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
- int channel)
+static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
{
- return 0444;
+ const struct aqc_data *priv = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (priv->kind) {
+ case d5next:
+ if (channel == 0)
+ return 0444;
+ break;
+ case farbwerk360:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ case hwmon_power:
+ case hwmon_in:
+ case hwmon_curr:
+ switch (priv->kind) {
+ case d5next:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
}
-static int d5next_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
- long *val)
+static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
{
- struct d5next_data *priv = dev_get_drvdata(dev);
+ struct aqc_data *priv = dev_get_drvdata(dev);
- if (time_after(jiffies, priv->updated + D5NEXT_STATUS_UPDATE_INTERVAL))
+ if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL))
return -ENODATA;
switch (type) {
case hwmon_temp:
- *val = priv->temp_input;
+ if (priv->temp_input[channel] == -ENODATA)
+ return -ENODATA;
+
+ *val = priv->temp_input[channel];
break;
case hwmon_fan:
*val = priv->speed_input[channel];
@@ -134,24 +185,59 @@ static int d5next_read(struct device *dev, enum hwmon_sensor_types type, u32 att
return 0;
}
-static int d5next_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
- int channel, const char **str)
+static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
{
+ struct aqc_data *priv = dev_get_drvdata(dev);
+
switch (type) {
case hwmon_temp:
- *str = L_COOLANT_TEMP;
+ switch (priv->kind) {
+ case d5next:
+ *str = L_D5NEXT_COOLANT_TEMP;
+ break;
+ case farbwerk360:
+ *str = label_temp_sensors[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_fan:
- *str = label_speeds[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_speeds[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_power:
- *str = label_power[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_power[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_in:
- *str = label_voltages[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_voltages[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_curr:
- *str = label_current[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_current[channel];
+ break;
+ default:
+ break;
+ }
break;
default:
return -EOPNOTSUPP;
@@ -160,60 +246,89 @@ static int d5next_read_string(struct device *dev, enum hwmon_sensor_types type,
return 0;
}
-static const struct hwmon_ops d5next_hwmon_ops = {
- .is_visible = d5next_is_visible,
- .read = d5next_read,
- .read_string = d5next_read_string,
+static const struct hwmon_ops aqc_hwmon_ops = {
+ .is_visible = aqc_is_visible,
+ .read = aqc_read,
+ .read_string = aqc_read_string,
};
-static const struct hwmon_channel_info *d5next_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+static const struct hwmon_channel_info *aqc_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_LABEL,
+ HWMON_P_INPUT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
HWMON_I_INPUT | HWMON_I_LABEL),
- HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
NULL
};
-static const struct hwmon_chip_info d5next_chip_info = {
- .ops = &d5next_hwmon_ops,
- .info = d5next_info,
+static const struct hwmon_chip_info aqc_chip_info = {
+ .ops = &aqc_hwmon_ops,
+ .info = aqc_info,
};
-static int d5next_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size)
+static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data,
+ int size)
{
- struct d5next_data *priv;
+ int i, sensor_value;
+ struct aqc_data *priv;
- if (report->id != D5NEXT_STATUS_REPORT_ID)
+ if (report->id != STATUS_REPORT_ID)
return 0;
priv = hid_get_drvdata(hdev);
/* Info provided with every report */
-
- priv->serial_number[0] = get_unaligned_be16(data + D5NEXT_SERIAL_FIRST_PART);
- priv->serial_number[1] = get_unaligned_be16(data + D5NEXT_SERIAL_SECOND_PART);
-
- priv->firmware_version = get_unaligned_be16(data + D5NEXT_FIRMWARE_VERSION);
- priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES);
+ priv->serial_number[0] = get_unaligned_be16(data + SERIAL_FIRST_PART);
+ priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
+ priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
/* Sensor readings */
+ switch (priv->kind) {
+ case d5next:
+ priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES);
- priv->temp_input = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10;
+ priv->temp_input[0] = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10;
- priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED);
- priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED);
+ priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED);
+ priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED);
- priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000;
- priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000;
+ priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000;
+ priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000;
- priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10;
- priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10;
- priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
+ priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10;
+ priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10;
+ priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
- priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT);
- priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT);
+ priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT);
+ priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT);
+ break;
+ case farbwerk360:
+ /* Temperature sensor readings */
+ for (i = 0; i < FARBWERK360_NUM_SENSORS; i++) {
+ sensor_value = get_unaligned_be16(data + FARBWERK360_SENSOR_START +
+ i * FARBWERK360_SENSOR_SIZE);
+ if (sensor_value == FARBWERK360_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ }
+ break;
+ default:
+ break;
+ }
priv->updated = jiffies;
@@ -224,7 +339,7 @@ static int d5next_raw_event(struct hid_device *hdev, struct hid_report *report,
static int serial_number_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%05u-%05u\n", priv->serial_number[0], priv->serial_number[1]);
@@ -234,7 +349,7 @@ DEFINE_SHOW_ATTRIBUTE(serial_number);
static int firmware_version_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%u\n", priv->firmware_version);
@@ -244,7 +359,7 @@ DEFINE_SHOW_ATTRIBUTE(firmware_version);
static int power_cycles_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%u\n", priv->power_cycles);
@@ -252,29 +367,32 @@ static int power_cycles_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(power_cycles);
-static void d5next_debugfs_init(struct d5next_data *priv)
+static void aqc_debugfs_init(struct aqc_data *priv)
{
- char name[32];
+ char name[64];
- scnprintf(name, sizeof(name), "%s-%s", DRIVER_NAME, dev_name(&priv->hdev->dev));
+ scnprintf(name, sizeof(name), "%s_%s-%s", "aquacomputer", priv->name,
+ dev_name(&priv->hdev->dev));
priv->debugfs = debugfs_create_dir(name, NULL);
debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops);
debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
- debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
+
+ if (priv->kind == d5next)
+ debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
}
#else
-static void d5next_debugfs_init(struct d5next_data *priv)
+static void aqc_debugfs_init(struct aqc_data *priv)
{
}
#endif
-static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct d5next_data *priv;
+ struct aqc_data *priv;
int ret;
priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -284,7 +402,7 @@ static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->hdev = hdev;
hid_set_drvdata(hdev, priv);
- priv->updated = jiffies - D5NEXT_STATUS_UPDATE_INTERVAL;
+ priv->updated = jiffies - STATUS_UPDATE_INTERVAL;
ret = hid_parse(hdev);
if (ret)
@@ -298,15 +416,28 @@ static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto fail_and_stop;
- priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "d5next", priv,
- &d5next_chip_info, NULL);
+ switch (hdev->product) {
+ case USB_PRODUCT_ID_D5NEXT:
+ priv->kind = d5next;
+ break;
+ case USB_PRODUCT_ID_FARBWERK360:
+ priv->kind = farbwerk360;
+ break;
+ default:
+ break;
+ }
+
+ priv->name = aqc_device_names[priv->kind];
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, priv->name, priv,
+ &aqc_chip_info, NULL);
if (IS_ERR(priv->hwmon_dev)) {
ret = PTR_ERR(priv->hwmon_dev);
goto fail_and_close;
}
- d5next_debugfs_init(priv);
+ aqc_debugfs_init(priv);
return 0;
@@ -317,9 +448,9 @@ fail_and_stop:
return ret;
}
-static void d5next_remove(struct hid_device *hdev)
+static void aqc_remove(struct hid_device *hdev)
{
- struct d5next_data *priv = hid_get_drvdata(hdev);
+ struct aqc_data *priv = hid_get_drvdata(hdev);
debugfs_remove_recursive(priv->debugfs);
hwmon_device_unregister(priv->hwmon_dev);
@@ -328,36 +459,36 @@ static void d5next_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static const struct hid_device_id d5next_table[] = {
- { HID_USB_DEVICE(0x0c70, 0xf00e) }, /* Aquacomputer D5 Next */
- {},
+static const struct hid_device_id aqc_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_D5NEXT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
+ { }
};
-MODULE_DEVICE_TABLE(hid, d5next_table);
+MODULE_DEVICE_TABLE(hid, aqc_table);
-static struct hid_driver d5next_driver = {
+static struct hid_driver aqc_driver = {
.name = DRIVER_NAME,
- .id_table = d5next_table,
- .probe = d5next_probe,
- .remove = d5next_remove,
- .raw_event = d5next_raw_event,
+ .id_table = aqc_table,
+ .probe = aqc_probe,
+ .remove = aqc_remove,
+ .raw_event = aqc_raw_event,
};
-static int __init d5next_init(void)
+static int __init aqc_init(void)
{
- return hid_register_driver(&d5next_driver);
+ return hid_register_driver(&aqc_driver);
}
-static void __exit d5next_exit(void)
+static void __exit aqc_exit(void)
{
- hid_unregister_driver(&d5next_driver);
+ hid_unregister_driver(&aqc_driver);
}
/* Request to initialize after the HID bus to ensure it's not being loaded before */
-
-late_initcall(d5next_init);
-module_exit(d5next_exit);
+late_initcall(aqc_init);
+module_exit(aqc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>");
-MODULE_DESCRIPTION("Hwmon driver for Aquacomputer D5 Next pump");
+MODULE_DESCRIPTION("Hwmon driver for Aquacomputer devices");
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
new file mode 100644
index 000000000000..b5cf0136360c
--- /dev/null
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON driver for ASUS motherboards that publish some sensor values
+ * via the embedded controller registers.
+ *
+ * Copyright (C) 2021 Eugene Shalygin <eugene.shalygin@gmail.com>
+
+ * EC provides:
+ * - Chipset temperature
+ * - CPU temperature
+ * - Motherboard temperature
+ * - T_Sensor temperature
+ * - VRM temperature
+ * - Water In temperature
+ * - Water Out temperature
+ * - CPU Optional fan RPM
+ * - Chipset fan RPM
+ * - VRM Heat Sink fan RPM
+ * - Water Flow fan RPM
+ * - CPU current
+ * - CPU core voltage
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/dev_printk.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+static char *mutex_path_override;
+
+/* Writing to this EC register switches EC bank */
+#define ASUS_EC_BANK_REGISTER 0xff
+#define SENSOR_LABEL_LEN 16
+
+/*
+ * Arbitrary set max. allowed bank number. Required for sorting banks and
+ * currently is overkill with just 2 banks used at max, but for the sake
+ * of alignment let's set it to a higher value.
+ */
+#define ASUS_EC_MAX_BANK 3
+
+#define ACPI_LOCK_DELAY_MS 500
+
+/* ACPI mutex for locking access to the EC for the firmware */
+#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
+
+/* There are two variants of the vendor spelling */
+#define VENDOR_ASUS_UPPER_CASE "ASUSTeK COMPUTER INC."
+
+typedef union {
+ u32 value;
+ struct {
+ u8 index;
+ u8 bank;
+ u8 size;
+ u8 dummy;
+ } components;
+} sensor_address;
+
+#define MAKE_SENSOR_ADDRESS(size, bank, index) { \
+ .value = (size << 16) + (bank << 8) + index \
+ }
+
+static u32 hwmon_attributes[hwmon_max] = {
+ [hwmon_chip] = HWMON_C_REGISTER_TZ,
+ [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
+ [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
+ [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
+ [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
+};
+
+struct ec_sensor_info {
+ char label[SENSOR_LABEL_LEN];
+ enum hwmon_sensor_types type;
+ sensor_address addr;
+};
+
+#define EC_SENSOR(sensor_label, sensor_type, size, bank, index) { \
+ .label = sensor_label, .type = sensor_type, \
+ .addr = MAKE_SENSOR_ADDRESS(size, bank, index), \
+ }
+
+enum ec_sensors {
+ /* chipset temperature [℃] */
+ ec_sensor_temp_chipset,
+ /* CPU temperature [℃] */
+ ec_sensor_temp_cpu,
+ /* motherboard temperature [℃] */
+ ec_sensor_temp_mb,
+ /* "T_Sensor" temperature sensor reading [℃] */
+ ec_sensor_temp_t_sensor,
+ /* VRM temperature [℃] */
+ ec_sensor_temp_vrm,
+ /* CPU Core voltage [mV] */
+ ec_sensor_in_cpu_core,
+ /* CPU_Opt fan [RPM] */
+ ec_sensor_fan_cpu_opt,
+ /* VRM heat sink fan [RPM] */
+ ec_sensor_fan_vrm_hs,
+ /* Chipset fan [RPM] */
+ ec_sensor_fan_chipset,
+ /* Water flow sensor reading [RPM] */
+ ec_sensor_fan_water_flow,
+ /* CPU current [A] */
+ ec_sensor_curr_cpu,
+ /* "Water_In" temperature sensor reading [℃] */
+ ec_sensor_temp_water_in,
+ /* "Water_Out" temperature sensor reading [℃] */
+ ec_sensor_temp_water_out,
+};
+
+#define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset)
+#define SENSOR_TEMP_CPU BIT(ec_sensor_temp_cpu)
+#define SENSOR_TEMP_MB BIT(ec_sensor_temp_mb)
+#define SENSOR_TEMP_T_SENSOR BIT(ec_sensor_temp_t_sensor)
+#define SENSOR_TEMP_VRM BIT(ec_sensor_temp_vrm)
+#define SENSOR_IN_CPU_CORE BIT(ec_sensor_in_cpu_core)
+#define SENSOR_FAN_CPU_OPT BIT(ec_sensor_fan_cpu_opt)
+#define SENSOR_FAN_VRM_HS BIT(ec_sensor_fan_vrm_hs)
+#define SENSOR_FAN_CHIPSET BIT(ec_sensor_fan_chipset)
+#define SENSOR_FAN_WATER_FLOW BIT(ec_sensor_fan_water_flow)
+#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
+#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
+#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
+
+/* All the known sensors for ASUS EC controllers */
+static const struct ec_sensor_info known_ec_sensors[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_in_cpu_core] =
+ EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_chipset] =
+ EC_SENSOR("Chipset", hwmon_fan, 2, 0x00, 0xb4),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_curr_cpu] = EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+};
+
+/* Shortcuts for common combinations */
+#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
+ (SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
+#define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT)
+
+#define DMI_EXACT_MATCH_BOARD(vendor, name, sensors) { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, vendor), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)(sensors), \
+}
+
+static const struct dmi_system_id asus_ec_dmi_table[] __initconst = {
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "PRIME X570-PRO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "Pro WS X570-ACE",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII DARK HERO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII FORMULA",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG CROSSHAIR VIII HERO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII HERO (WI-FI)",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII IMPACT",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX B550-E GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CPU_OPT),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX B550-I GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_VRM_HS |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-E GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-F GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-I GAMING",
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ {}
+};
+
+struct ec_sensor {
+ unsigned int info_index;
+ s32 cached_value;
+};
+
+struct ec_sensors_data {
+ unsigned long board_sensors;
+ struct ec_sensor *sensors;
+ /* EC registers to read from */
+ u16 *registers;
+ u8 *read_buffer;
+ /* sorted list of unique register banks */
+ u8 banks[ASUS_EC_MAX_BANK + 1];
+ /* in jiffies */
+ unsigned long last_updated;
+ acpi_handle aml_mutex;
+ /* number of board EC sensors */
+ u8 nr_sensors;
+ /*
+ * number of EC registers to read
+ * (sensor might span more than 1 register)
+ */
+ u8 nr_registers;
+ /* number of unique register banks */
+ u8 nr_banks;
+};
+
+static u8 register_bank(u16 reg)
+{
+ return reg >> 8;
+}
+
+static u8 register_index(u16 reg)
+{
+ return reg & 0x00ff;
+}
+
+static bool is_sensor_data_signed(const struct ec_sensor_info *si)
+{
+ /*
+ * guessed from WMI functions in DSDT code for boards
+ * of the X470 generation
+ */
+ return si->type == hwmon_temp;
+}
+
+static const struct ec_sensor_info *
+get_sensor_info(const struct ec_sensors_data *state, int index)
+{
+ return &known_ec_sensors[state->sensors[index].info_index];
+}
+
+static int find_ec_sensor_index(const struct ec_sensors_data *ec,
+ enum hwmon_sensor_types type, int channel)
+{
+ unsigned int i;
+
+ for (i = 0; i < ec->nr_sensors; i++) {
+ if (get_sensor_info(ec, i)->type == type) {
+ if (channel == 0)
+ return i;
+ channel--;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __init bank_compare(const void *a, const void *b)
+{
+ return *((const s8 *)a) - *((const s8 *)b);
+}
+
+static int __init board_sensors_count(unsigned long sensors)
+{
+ return hweight_long(sensors);
+}
+
+static void __init setup_sensor_data(struct ec_sensors_data *ec)
+{
+ struct ec_sensor *s = ec->sensors;
+ bool bank_found;
+ int i, j;
+ u8 bank;
+
+ ec->nr_banks = 0;
+ ec->nr_registers = 0;
+
+ for_each_set_bit(i, &ec->board_sensors,
+ BITS_PER_TYPE(ec->board_sensors)) {
+ s->info_index = i;
+ s->cached_value = 0;
+ ec->nr_registers +=
+ known_ec_sensors[s->info_index].addr.components.size;
+ bank_found = false;
+ bank = known_ec_sensors[s->info_index].addr.components.bank;
+ for (j = 0; j < ec->nr_banks; j++) {
+ if (ec->banks[j] == bank) {
+ bank_found = true;
+ break;
+ }
+ }
+ if (!bank_found) {
+ ec->banks[ec->nr_banks++] = bank;
+ }
+ s++;
+ }
+ sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
+}
+
+static void __init fill_ec_registers(struct ec_sensors_data *ec)
+{
+ const struct ec_sensor_info *si;
+ unsigned int i, j, register_idx = 0;
+
+ for (i = 0; i < ec->nr_sensors; ++i) {
+ si = get_sensor_info(ec, i);
+ for (j = 0; j < si->addr.components.size; ++j, ++register_idx) {
+ ec->registers[register_idx] =
+ (si->addr.components.bank << 8) +
+ si->addr.components.index + j;
+ }
+ }
+}
+
+static acpi_handle __init asus_hw_access_mutex(struct device *dev)
+{
+ const char *mutex_path;
+ acpi_handle res;
+ int status;
+
+ mutex_path = mutex_path_override ?
+ mutex_path_override : ASUS_HW_ACCESS_MUTEX_ASMX;
+
+ status = acpi_get_handle(NULL, (acpi_string)mutex_path, &res);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev,
+ "Could not get hardware access guard mutex '%s': error %d",
+ mutex_path, status);
+ return NULL;
+ }
+ return res;
+}
+
+static int asus_ec_bank_switch(u8 bank, u8 *old)
+{
+ int status = 0;
+
+ if (old) {
+ status = ec_read(ASUS_EC_BANK_REGISTER, old);
+ }
+ if (status || (old && (*old == bank)))
+ return status;
+ return ec_write(ASUS_EC_BANK_REGISTER, bank);
+}
+
+static int asus_ec_block_read(const struct device *dev,
+ struct ec_sensors_data *ec)
+{
+ int ireg, ibank, status;
+ u8 bank, reg_bank, prev_bank;
+
+ bank = 0;
+ status = asus_ec_bank_switch(bank, &prev_bank);
+ if (status) {
+ dev_warn(dev, "EC bank switch failed");
+ return status;
+ }
+
+ if (prev_bank) {
+ /* oops... somebody else is working with the EC too */
+ dev_warn(dev,
+ "Concurrent access to the ACPI EC detected.\nRace condition possible.");
+ }
+
+ /* read registers minimizing bank switches. */
+ for (ibank = 0; ibank < ec->nr_banks; ibank++) {
+ if (bank != ec->banks[ibank]) {
+ bank = ec->banks[ibank];
+ if (asus_ec_bank_switch(bank, NULL)) {
+ dev_warn(dev, "EC bank switch to %d failed",
+ bank);
+ break;
+ }
+ }
+ for (ireg = 0; ireg < ec->nr_registers; ireg++) {
+ reg_bank = register_bank(ec->registers[ireg]);
+ if (reg_bank < bank) {
+ continue;
+ }
+ ec_read(register_index(ec->registers[ireg]),
+ ec->read_buffer + ireg);
+ }
+ }
+
+ status = asus_ec_bank_switch(prev_bank, NULL);
+ return status;
+}
+
+static inline s32 get_sensor_value(const struct ec_sensor_info *si, u8 *data)
+{
+ if (is_sensor_data_signed(si)) {
+ switch (si->addr.components.size) {
+ case 1:
+ return (s8)*data;
+ case 2:
+ return (s16)get_unaligned_be16(data);
+ case 4:
+ return (s32)get_unaligned_be32(data);
+ default:
+ return 0;
+ }
+ } else {
+ switch (si->addr.components.size) {
+ case 1:
+ return *data;
+ case 2:
+ return get_unaligned_be16(data);
+ case 4:
+ return get_unaligned_be32(data);
+ default:
+ return 0;
+ }
+ }
+}
+
+static void update_sensor_values(struct ec_sensors_data *ec, u8 *data)
+{
+ const struct ec_sensor_info *si;
+ struct ec_sensor *s;
+
+ for (s = ec->sensors; s != ec->sensors + ec->nr_sensors; s++) {
+ si = &known_ec_sensors[s->info_index];
+ s->cached_value = get_sensor_value(si, data);
+ data += si->addr.components.size;
+ }
+}
+
+static int update_ec_sensors(const struct device *dev,
+ struct ec_sensors_data *ec)
+{
+ int status;
+
+ /*
+ * ASUS DSDT does not specify that access to the EC has to be guarded,
+ * but firmware does access it via ACPI
+ */
+ if (ACPI_FAILURE(acpi_acquire_mutex(ec->aml_mutex, NULL,
+ ACPI_LOCK_DELAY_MS))) {
+ dev_err(dev, "Failed to acquire AML mutex");
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ status = asus_ec_block_read(dev, ec);
+
+ if (!status) {
+ update_sensor_values(ec, ec->read_buffer);
+ }
+ if (ACPI_FAILURE(acpi_release_mutex(ec->aml_mutex, NULL))) {
+ dev_err(dev, "Failed to release AML mutex");
+ }
+cleanup:
+ return status;
+}
+
+static long scale_sensor_value(s32 value, int data_type)
+{
+ switch (data_type) {
+ case hwmon_curr:
+ case hwmon_temp:
+ return value * MILLI;
+ default:
+ return value;
+ }
+}
+
+static int get_cached_value_or_update(const struct device *dev,
+ int sensor_index,
+ struct ec_sensors_data *state, s32 *value)
+{
+ if (time_after(jiffies, state->last_updated + HZ)) {
+ if (update_ec_sensors(dev, state)) {
+ dev_err(dev, "update_ec_sensors() failure\n");
+ return -EIO;
+ }
+
+ state->last_updated = jiffies;
+ }
+
+ *value = state->sensors[sensor_index].cached_value;
+ return 0;
+}
+
+/*
+ * Now follow the functions that implement the hwmon interface
+ */
+
+static int asus_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+ s32 value = 0;
+
+ struct ec_sensors_data *state = dev_get_drvdata(dev);
+ int sidx = find_ec_sensor_index(state, type, channel);
+
+ if (sidx < 0) {
+ return sidx;
+ }
+
+ ret = get_cached_value_or_update(dev, sidx, state, &value);
+ if (!ret) {
+ *val = scale_sensor_value(value,
+ get_sensor_info(state, sidx)->type);
+ }
+
+ return ret;
+}
+
+static int asus_ec_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct ec_sensors_data *state = dev_get_drvdata(dev);
+ int sensor_index = find_ec_sensor_index(state, type, channel);
+ *str = get_sensor_info(state, sensor_index)->label;
+
+ return 0;
+}
+
+static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct ec_sensors_data *state = drvdata;
+
+ return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
+}
+
+static int __init
+asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
+ struct device *dev, int num,
+ enum hwmon_sensor_types type, u32 config)
+{
+ int i;
+ u32 *cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
+
+ if (!cfg)
+ return -ENOMEM;
+
+ asus_ec_hwmon_chan->type = type;
+ asus_ec_hwmon_chan->config = cfg;
+ for (i = 0; i < num; i++, cfg++)
+ *cfg = config;
+
+ return 0;
+}
+
+static const struct hwmon_ops asus_ec_hwmon_ops = {
+ .is_visible = asus_ec_hwmon_is_visible,
+ .read = asus_ec_hwmon_read,
+ .read_string = asus_ec_hwmon_read_string,
+};
+
+static struct hwmon_chip_info asus_ec_chip_info = {
+ .ops = &asus_ec_hwmon_ops,
+};
+
+static unsigned long __init get_board_sensors(void)
+{
+ const struct dmi_system_id *dmi_entry =
+ dmi_first_match(asus_ec_dmi_table);
+
+ return dmi_entry ? (unsigned long)dmi_entry->driver_data : 0;
+}
+
+static int __init asus_ec_probe(struct platform_device *pdev)
+{
+ const struct hwmon_channel_info **ptr_asus_ec_ci;
+ int nr_count[hwmon_max] = { 0 }, nr_types = 0;
+ struct hwmon_channel_info *asus_ec_hwmon_chan;
+ const struct hwmon_chip_info *chip_info;
+ struct device *dev = &pdev->dev;
+ struct ec_sensors_data *ec_data;
+ const struct ec_sensor_info *si;
+ enum hwmon_sensor_types type;
+ unsigned long board_sensors;
+ struct device *hwdev;
+ unsigned int i;
+
+ board_sensors = get_board_sensors();
+ if (!board_sensors)
+ return -ENODEV;
+
+ ec_data = devm_kzalloc(dev, sizeof(struct ec_sensors_data),
+ GFP_KERNEL);
+ if (!ec_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ec_data);
+ ec_data->board_sensors = board_sensors;
+ ec_data->nr_sensors = board_sensors_count(ec_data->board_sensors);
+ ec_data->sensors = devm_kcalloc(dev, ec_data->nr_sensors,
+ sizeof(struct ec_sensor), GFP_KERNEL);
+
+ setup_sensor_data(ec_data);
+ ec_data->registers = devm_kcalloc(dev, ec_data->nr_registers,
+ sizeof(u16), GFP_KERNEL);
+ ec_data->read_buffer = devm_kcalloc(dev, ec_data->nr_registers,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!ec_data->registers || !ec_data->read_buffer)
+ return -ENOMEM;
+
+ fill_ec_registers(ec_data);
+
+ ec_data->aml_mutex = asus_hw_access_mutex(dev);
+
+ for (i = 0; i < ec_data->nr_sensors; ++i) {
+ si = get_sensor_info(ec_data, i);
+ if (!nr_count[si->type])
+ ++nr_types;
+ ++nr_count[si->type];
+ }
+
+ if (nr_count[hwmon_temp])
+ nr_count[hwmon_chip]++, nr_types++;
+
+ asus_ec_hwmon_chan = devm_kcalloc(
+ dev, nr_types, sizeof(*asus_ec_hwmon_chan), GFP_KERNEL);
+ if (!asus_ec_hwmon_chan)
+ return -ENOMEM;
+
+ ptr_asus_ec_ci = devm_kcalloc(dev, nr_types + 1,
+ sizeof(*ptr_asus_ec_ci), GFP_KERNEL);
+ if (!ptr_asus_ec_ci)
+ return -ENOMEM;
+
+ asus_ec_chip_info.info = ptr_asus_ec_ci;
+ chip_info = &asus_ec_chip_info;
+
+ for (type = 0; type < hwmon_max; ++type) {
+ if (!nr_count[type])
+ continue;
+
+ asus_ec_hwmon_add_chan_info(asus_ec_hwmon_chan, dev,
+ nr_count[type], type,
+ hwmon_attributes[type]);
+ *ptr_asus_ec_ci++ = asus_ec_hwmon_chan++;
+ }
+
+ dev_info(dev, "board has %d EC sensors that span %d registers",
+ ec_data->nr_sensors, ec_data->nr_registers);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, "asusec",
+ ec_data, chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwdev);
+}
+
+
+static const struct acpi_device_id acpi_ec_ids[] = {
+ /* Embedded Controller Device */
+ { "PNP0C09", 0 },
+ {}
+};
+
+static struct platform_driver asus_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "asus-ec-sensors",
+ .acpi_match_table = acpi_ec_ids,
+ },
+};
+
+MODULE_DEVICE_TABLE(dmi, asus_ec_dmi_table);
+module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+
+module_param_named(mutex_path, mutex_path_override, charp, 0);
+MODULE_PARM_DESC(mutex_path,
+ "Override ACPI mutex path used to guard access to hardware");
+
+MODULE_AUTHOR("Eugene Shalygin <eugene.shalygin@gmail.com>");
+MODULE_DESCRIPTION(
+ "HWMON driver for sensors accessible via ACPI EC in ASUS motherboards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/asus_wmi_ec_sensors.c b/drivers/hwmon/asus_wmi_ec_sensors.c
index 22a1459305a7..a3a2f014dec0 100644
--- a/drivers/hwmon/asus_wmi_ec_sensors.c
+++ b/drivers/hwmon/asus_wmi_ec_sensors.c
@@ -112,7 +112,8 @@ struct asus_wmi_data {
/* boards with EC support */
static struct asus_wmi_data sensors_board_PW_X570_P = {
.known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
SENSOR_FAN_CHIPSET,
SENSOR_MAX
},
diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
index c80eee874b6c..8fdcb62ae52d 100644
--- a/drivers/hwmon/asus_wmi_sensors.c
+++ b/drivers/hwmon/asus_wmi_sensors.c
@@ -77,6 +77,7 @@ static const struct dmi_system_id asus_wmi_dmi_table[] = {
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-E GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-F GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-F GAMING II"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-I GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X399-E GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X470-F GAMING"),
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index d2092c17d993..96c4a5c45291 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -339,7 +339,8 @@ static irqreturn_t axi_fan_control_irq_handler(int irq, void *data)
ctl->update_tacho_params = true;
} else {
ctl->hw_pwm_req = false;
- sysfs_notify(&ctl->hdev->kobj, NULL, "pwm1");
+ hwmon_notify_event(ctl->hdev, hwmon_pwm,
+ hwmon_pwm_input, 0);
}
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 9949eeb79378..84cb1ede7bc0 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/hwmon.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -86,8 +87,8 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("i8k");
static bool force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force, "Force loading without checking for supported models");
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported models and features");
static bool ignore_dmi;
module_param(ignore_dmi, bool, 0);
@@ -250,46 +251,52 @@ static int i8k_smm(struct smm_regs *regs)
/*
* Read the fan status.
*/
-static int i8k_get_fan_status(const struct dell_smm_data *data, int fan)
+static int i8k_get_fan_status(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_FAN,
+ .ebx = fan,
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
/*
* Read the fan speed in RPM.
*/
-static int i8k_get_fan_speed(const struct dell_smm_data *data, int fan)
+static int i8k_get_fan_speed(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_SPEED,
+ .ebx = fan,
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
}
/*
* Read the fan type.
*/
-static int _i8k_get_fan_type(const struct dell_smm_data *data, int fan)
+static int _i8k_get_fan_type(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_FAN_TYPE,
+ .ebx = fan,
+ };
if (data->disallow_fan_support || data->disallow_fan_type_call)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
-static int i8k_get_fan_type(struct dell_smm_data *data, int fan)
+static int i8k_get_fan_type(struct dell_smm_data *data, u8 fan)
{
/* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
if (data->fan_type[fan] == INT_MIN)
@@ -301,14 +308,16 @@ static int i8k_get_fan_type(struct dell_smm_data *data, int fan)
/*
* Read the fan nominal rpm for specific fan speed.
*/
-static int __init i8k_get_fan_nominal_speed(const struct dell_smm_data *data, int fan, int speed)
+static int __init i8k_get_fan_nominal_speed(const struct dell_smm_data *data, u8 fan, int speed)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_NOM_SPEED,
+ .ebx = fan | (speed << 8),
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
}
@@ -329,7 +338,7 @@ static int i8k_enable_fan_auto_mode(const struct dell_smm_data *data, bool enabl
/*
* Set the fan speed (off, low, high, ...).
*/
-static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
+static int i8k_set_fan(const struct dell_smm_data *data, u8 fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
@@ -337,33 +346,35 @@ static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
return -EINVAL;
speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
- regs.ebx = (fan & 0xff) | (speed << 8);
+ regs.ebx = fan | (speed << 8);
return i8k_smm(&regs);
}
-static int __init i8k_get_temp_type(int sensor)
+static int __init i8k_get_temp_type(u8 sensor)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP_TYPE, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_TEMP_TYPE,
+ .ebx = sensor,
+ };
- regs.ebx = sensor & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
/*
* Read the cpu temperature.
*/
-static int _i8k_get_temp(int sensor)
+static int _i8k_get_temp(u8 sensor)
{
struct smm_regs regs = {
.eax = I8K_SMM_GET_TEMP,
- .ebx = sensor & 0xff,
+ .ebx = sensor,
};
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
-static int i8k_get_temp(int sensor)
+static int i8k_get_temp(u8 sensor)
{
int temp = _i8k_get_temp(sensor);
@@ -496,6 +507,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
val = i8k_get_fan_speed(data, val);
break;
@@ -503,6 +517,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
val = i8k_get_fan_status(data, val);
break;
@@ -513,6 +530,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
@@ -631,6 +651,11 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
+ /* _i8k_get_temp() is fine since we do not care about the actual value */
+ if (data->temp_type[channel] >= 0 || _i8k_get_temp(channel) >= 0)
+ return 0444;
+
+ break;
case hwmon_temp_label:
if (data->temp_type[channel] >= 0)
return 0444;
@@ -920,7 +945,8 @@ static int __init dell_smm_init_hwmon(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct device *dell_smm_hwmon_dev;
- int i, state, err;
+ int state, err;
+ u8 i;
for (i = 0; i < DELL_SMM_NO_TEMP; i++) {
data->temp_type[i] = i8k_get_temp_type(i);
@@ -1131,6 +1157,13 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
},
},
+ {
+ .ident = "Dell Inspiron 3505",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 3505"),
+ },
+ },
{ }
};
@@ -1236,7 +1269,8 @@ static int __init dell_smm_probe(struct platform_device *pdev)
{
struct dell_smm_data *data;
const struct dmi_system_id *id, *fan_control;
- int fan, ret;
+ int ret;
+ u8 fan;
data = devm_kzalloc(&pdev->dev, sizeof(struct dell_smm_data), GFP_KERNEL);
if (!data)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3501a3ead4ba..989e2c8496dd 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/thermal.h>
@@ -30,6 +31,7 @@
struct hwmon_device {
const char *name;
+ const char *label;
struct device dev;
const struct hwmon_chip_info *chip;
struct list_head tzdata;
@@ -71,17 +73,29 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(name);
+static ssize_t
+label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", to_hwmon_device(dev)->label);
+}
+static DEVICE_ATTR_RO(label);
+
static struct attribute *hwmon_dev_attrs[] = {
&dev_attr_name.attr,
+ &dev_attr_label.attr,
NULL
};
-static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+static umode_t hwmon_dev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct hwmon_device *hdev = to_hwmon_device(dev);
- if (to_hwmon_device(dev)->name == NULL)
+ if (attr == &dev_attr_name.attr && hdev->name == NULL)
+ return 0;
+
+ if (attr == &dev_attr_label.attr && hdev->label == NULL)
return 0;
return attr->mode;
@@ -89,7 +103,7 @@ static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
static const struct attribute_group hwmon_dev_attr_group = {
.attrs = hwmon_dev_attrs,
- .is_visible = hwmon_dev_name_is_visible,
+ .is_visible = hwmon_dev_attr_is_visible,
};
static const struct attribute_group *hwmon_dev_attr_groups[] = {
@@ -117,6 +131,7 @@ static void hwmon_dev_release(struct device *dev)
if (hwdev->group.attrs)
hwmon_free_attrs(hwdev->group.attrs);
kfree(hwdev->groups);
+ kfree(hwdev->label);
kfree(hwdev);
}
@@ -214,12 +229,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
- /*
- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
- * so ignore that error but forward any other error.
- */
- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
- return PTR_ERR(tzd);
+ if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) != -ENODEV)
+ return PTR_ERR(tzd);
+ dev_info(dev, "temp%d_input not attached to any thermal zone\n",
+ index + 1);
+ devm_kfree(dev, tdata);
+ return 0;
+ }
err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
if (err)
@@ -587,6 +604,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
[hwmon_pwm_enable] = "pwm%d_enable",
[hwmon_pwm_mode] = "pwm%d_mode",
[hwmon_pwm_freq] = "pwm%d_freq",
+ [hwmon_pwm_auto_channels_temp] = "pwm%d_auto_channels_temp",
};
static const char * const hwmon_intrusion_attr_templates[] = {
@@ -623,7 +641,9 @@ static const int __templates_size[] = {
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ char event[MAX_SYSFS_ATTR_NAME_LENGTH + 5];
char sattr[MAX_SYSFS_ATTR_NAME_LENGTH];
+ char *envp[] = { event, NULL };
const char * const *templates;
const char *template;
int base;
@@ -639,8 +659,9 @@ int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
base = hwmon_attr_base(type);
scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel);
+ scnprintf(event, sizeof(event), "NAME=%s", sattr);
sysfs_notify(&dev->kobj, NULL, sattr);
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
if (type == hwmon_temp)
hwmon_thermal_notify(dev, channel);
@@ -733,6 +754,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
const struct attribute_group **groups)
{
struct hwmon_device *hwdev;
+ const char *label;
struct device *hdev;
int i, err, id;
@@ -788,6 +810,18 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hdev->groups = groups;
}
+ if (dev && device_property_present(dev, "label")) {
+ err = device_property_read_string(dev, "label", &label);
+ if (err < 0)
+ goto free_hwmon;
+
+ hwdev->label = kstrdup(label, GFP_KERNEL);
+ if (hwdev->label == NULL) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
+ }
+
hwdev->name = name;
hdev->class = &hwmon_class;
hdev->parent = dev;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index d2a60de5b8de..c20a749fc7f2 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -34,6 +34,7 @@
#define LM70_CHIP_LM71 2 /* NS LM71 */
#define LM70_CHIP_LM74 3 /* NS LM74 */
#define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */
+#define LM70_CHIP_TMP125 5 /* TI TMP125 */
struct lm70 {
struct spi_device *spi;
@@ -87,6 +88,12 @@ static ssize_t temp1_input_show(struct device *dev,
* LM71:
* 14 bits of 2's complement data, discard LSB 2 bits,
* resolution 0.0312 degrees celsius.
+ *
+ * TMP125:
+ * MSB/D15 is a leading zero. D14 is the sign-bit. This is
+ * followed by 9 temperature bits (D13..D5) in 2's complement
+ * data format with a resolution of 0.25 degrees celsius per unit.
+ * LSB 5 bits (D4..D0) share the same value as D5 and get discarded.
*/
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
@@ -102,6 +109,10 @@ static ssize_t temp1_input_show(struct device *dev,
case LM70_CHIP_LM71:
val = ((int)raw / 4) * 3125 / 100;
break;
+
+ case LM70_CHIP_TMP125:
+ val = (sign_extend32(raw, 14) / 32) * 250;
+ break;
}
status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */
@@ -136,6 +147,10 @@ static const struct of_device_id lm70_of_ids[] = {
.data = (void *) LM70_CHIP_TMP122,
},
{
+ .compatible = "ti,tmp125",
+ .data = (void *) LM70_CHIP_TMP125,
+ },
+ {
.compatible = "ti,lm71",
.data = (void *) LM70_CHIP_LM71,
},
@@ -184,6 +199,7 @@ static const struct spi_device_id lm70_ids[] = {
{ "lm70", LM70_CHIP_LM70 },
{ "tmp121", LM70_CHIP_TMP121 },
{ "tmp122", LM70_CHIP_TMP122 },
+ { "tmp125", LM70_CHIP_TMP125 },
{ "lm71", LM70_CHIP_LM71 },
{ "lm74", LM70_CHIP_LM74 },
{ },
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 74fd7aa373a3..12370dcefa6a 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -18,15 +18,15 @@
* http://www.national.com/pf/LM/LM82.html
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
+#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
#include <linux/hwmon.h>
-#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
/*
@@ -66,35 +66,35 @@ enum chips { lm83, lm82 };
#define LM83_REG_R_TCRIT 0x42
#define LM83_REG_W_TCRIT 0x5A
-/*
- * Conversions and various macros
- * The LM83 uses signed 8-bit values with LSB = 1 degree Celsius.
- */
-
-#define TEMP_FROM_REG(val) ((val) * 1000)
-#define TEMP_TO_REG(val) ((val) <= -128000 ? -128 : \
- (val) >= 127000 ? 127 : \
- (val) < 0 ? ((val) - 500) / 1000 : \
- ((val) + 500) / 1000)
-
-static const u8 LM83_REG_R_TEMP[] = {
+static const u8 LM83_REG_TEMP[] = {
LM83_REG_R_LOCAL_TEMP,
LM83_REG_R_REMOTE1_TEMP,
LM83_REG_R_REMOTE2_TEMP,
LM83_REG_R_REMOTE3_TEMP,
+};
+
+static const u8 LM83_REG_MAX[] = {
LM83_REG_R_LOCAL_HIGH,
LM83_REG_R_REMOTE1_HIGH,
LM83_REG_R_REMOTE2_HIGH,
LM83_REG_R_REMOTE3_HIGH,
- LM83_REG_R_TCRIT,
};
-static const u8 LM83_REG_W_HIGH[] = {
- LM83_REG_W_LOCAL_HIGH,
- LM83_REG_W_REMOTE1_HIGH,
- LM83_REG_W_REMOTE2_HIGH,
- LM83_REG_W_REMOTE3_HIGH,
- LM83_REG_W_TCRIT,
+/* alarm and fault registers and bits, indexed by channel */
+static const u8 LM83_ALARM_REG[] = {
+ LM83_REG_R_STATUS1, LM83_REG_R_STATUS2, LM83_REG_R_STATUS1, LM83_REG_R_STATUS2
+};
+
+static const u8 LM83_MAX_ALARM_BIT[] = {
+ BIT(6), BIT(7), BIT(4), BIT(4)
+};
+
+static const u8 LM83_CRIT_ALARM_BIT[] = {
+ BIT(0), BIT(0), BIT(1), BIT(1)
+};
+
+static const u8 LM83_FAULT_BIT[] = {
+ 0, BIT(5), BIT(2), BIT(2)
};
/*
@@ -102,180 +102,274 @@ static const u8 LM83_REG_W_HIGH[] = {
*/
struct lm83_data {
- struct i2c_client *client;
- const struct attribute_group *groups[3];
- struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* registers values */
- s8 temp[9]; /* 0..3: input 1-4,
- 4..7: high limit 1-4,
- 8 : critical limit */
- u16 alarms; /* bitvector, combined */
+ struct regmap *regmap;
+ enum chips type;
};
-static struct lm83_data *lm83_update_device(struct device *dev)
+/* regmap code */
+
+static int lm83_regmap_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- struct lm83_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
+ struct i2c_client *client = context;
+ int ret;
- mutex_lock(&data->update_lock);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ return ret;
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- int nr;
+ *val = ret;
+ return 0;
+}
- dev_dbg(&client->dev, "Updating lm83 data.\n");
- for (nr = 0; nr < 9; nr++) {
- data->temp[nr] =
- i2c_smbus_read_byte_data(client,
- LM83_REG_R_TEMP[nr]);
- }
- data->alarms =
- i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1)
- + (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2)
- << 8);
+/*
+ * The regmap write function maps read register addresses to write register
+ * addresses. This is necessary for regmap register caching to work.
+ * An alternative would be to clear the regmap cache whenever a register is
+ * written, but that would be much more expensive.
+ */
+static int lm83_regmap_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
- data->last_updated = jiffies;
- data->valid = true;
+ switch (reg) {
+ case LM83_REG_R_CONFIG:
+ case LM83_REG_R_LOCAL_HIGH:
+ case LM83_REG_R_REMOTE2_HIGH:
+ reg += 0x06;
+ break;
+ case LM83_REG_R_REMOTE1_HIGH:
+ case LM83_REG_R_REMOTE3_HIGH:
+ case LM83_REG_R_TCRIT:
+ reg += 0x18;
+ break;
+ default:
+ break;
}
- mutex_unlock(&data->update_lock);
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
- return data;
+static bool lm83_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LM83_REG_R_LOCAL_TEMP:
+ case LM83_REG_R_REMOTE1_TEMP:
+ case LM83_REG_R_REMOTE2_TEMP:
+ case LM83_REG_R_REMOTE3_TEMP:
+ case LM83_REG_R_STATUS1:
+ case LM83_REG_R_STATUS2:
+ return true;
+ default:
+ return false;
+ }
}
-/*
- * Sysfs stuff
- */
+static const struct regmap_config lm83_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = lm83_regmap_is_volatile,
+ .reg_read = lm83_regmap_reg_read,
+ .reg_write = lm83_regmap_reg_write,
+};
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+/* hwmon API */
+
+static int lm83_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm83_data *data = lm83_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
+ struct lm83_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = regmap_read(data->regmap, LM83_REG_TEMP[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_max:
+ err = regmap_read(data->regmap, LM83_REG_MAX[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_crit:
+ err = regmap_read(data->regmap, LM83_REG_R_TCRIT, &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_max_alarm:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_MAX_ALARM_BIT[channel]);
+ break;
+ case hwmon_temp_crit_alarm:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_CRIT_ALARM_BIT[channel]);
+ break;
+ case hwmon_temp_fault:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_FAULT_BIT[channel]);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int lm83_temp_write(struct device *dev, u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int nr = attr->index;
+ unsigned int regval;
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
- mutex_lock(&data->update_lock);
- data->temp[nr] = TEMP_TO_REG(val);
- i2c_smbus_write_byte_data(client, LM83_REG_W_HIGH[nr - 4],
- data->temp[nr]);
- mutex_unlock(&data->update_lock);
- return count;
+ switch (attr) {
+ case hwmon_temp_max:
+ err = regmap_write(data->regmap, LM83_REG_MAX[channel], regval);
+ if (err < 0)
+ return err;
+ break;
+ case hwmon_temp_crit:
+ err = regmap_write(data->regmap, LM83_REG_R_TCRIT, regval);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
- char *buf)
+static int lm83_chip_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct lm83_data *data = lm83_update_device(dev);
- return sprintf(buf, "%d\n", data->alarms);
+ struct lm83_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_chip_alarms:
+ err = regmap_read(data->regmap, LM83_REG_R_STATUS1, &regval);
+ if (err < 0)
+ return err;
+ *val = regval;
+ err = regmap_read(data->regmap, LM83_REG_R_STATUS2, &regval);
+ if (err < 0)
+ return err;
+ *val |= regval << 8;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
-static ssize_t alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int lm83_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm83_data *data = lm83_update_device(dev);
- int bitnr = attr->index;
+ switch (type) {
+ case hwmon_chip:
+ return lm83_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm83_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
+static int lm83_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return lm83_temp_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, 4);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, 5);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, temp, 6);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, temp, 7);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit, temp, 8);
-
-/* Individual alarm files */
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 4);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 9);
-static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 10);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 12);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 13);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 15);
-/* Raw alarm file for compatibility */
-static DEVICE_ATTR_RO(alarms);
-
-static struct attribute *lm83_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
-
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &dev_attr_alarms.attr,
- NULL
-};
+static umode_t lm83_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct lm83_data *data = _data;
-static const struct attribute_group lm83_group = {
- .attrs = lm83_attributes,
-};
+ /*
+ * LM82 only supports a single external channel, modeled as channel 2.
+ */
+ if (data->type == lm82 && (channel == 1 || channel == 3))
+ return 0;
-static struct attribute *lm83_attributes_opt[] = {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp4_crit.dev_attr.attr,
-
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ switch (type) {
+ case hwmon_chip:
+ if (attr == hwmon_chip_alarms)
+ return 0444;
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_fault:
+ if (channel)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_crit:
+ if (channel == 2)
+ return 0644;
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *lm83_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT
+ ),
NULL
};
-static const struct attribute_group lm83_group_opt = {
- .attrs = lm83_attributes_opt,
+static const struct hwmon_ops lm83_hwmon_ops = {
+ .is_visible = lm83_is_visible,
+ .read = lm83_read,
+ .write = lm83_write,
};
-/*
- * Real code
- */
+static const struct hwmon_chip_info lm83_chip_info = {
+ .ops = &lm83_hwmon_ops,
+ .info = lm83_info,
+};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm83_detect(struct i2c_client *new_client,
+static int lm83_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
- struct i2c_adapter *adapter = new_client->adapter;
+ struct i2c_adapter *adapter = client->adapter;
const char *name;
u8 man_id, chip_id;
@@ -283,22 +377,30 @@ static int lm83_detect(struct i2c_client *new_client,
return -ENODEV;
/* Detection */
- if ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS1) & 0xA8) ||
- (i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS2) & 0x48) ||
- (i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) & 0x41)) {
+ if ((i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1) & 0xA8) ||
+ (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2) & 0x48) ||
+ (i2c_smbus_read_byte_data(client, LM83_REG_R_CONFIG) & 0x41)) {
dev_dbg(&adapter->dev, "LM83 detection failed at 0x%02x\n",
- new_client->addr);
+ client->addr);
return -ENODEV;
}
/* Identification */
- man_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_MAN_ID);
+ man_id = i2c_smbus_read_byte_data(client, LM83_REG_R_MAN_ID);
if (man_id != 0x01) /* National Semiconductor */
return -ENODEV;
- chip_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_CHIP_ID);
+ chip_id = i2c_smbus_read_byte_data(client, LM83_REG_R_CHIP_ID);
switch (chip_id) {
case 0x03:
+ /*
+ * According to the LM82 datasheet dated March 2013, recent
+ * revisions of LM82 have a die revision of 0x03. This was
+ * confirmed with a real chip. Further details in this revision
+ * of the LM82 datasheet strongly suggest that LM82 is just a
+ * repackaged LM83. It is therefore impossible to distinguish
+ * those chips from LM83, and they will be misdetected as LM83.
+ */
name = "lm83";
break;
case 0x01:
@@ -306,9 +408,9 @@ static int lm83_detect(struct i2c_client *new_client,
break;
default:
/* identification failed */
- dev_info(&adapter->dev,
- "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
- man_id, chip_id);
+ dev_dbg(&adapter->dev,
+ "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
+ man_id, chip_id);
return -ENODEV;
}
@@ -317,34 +419,31 @@ static int lm83_detect(struct i2c_client *new_client,
return 0;
}
-static const struct i2c_device_id lm83_id[];
+static const struct i2c_device_id lm83_id[] = {
+ { "lm83", lm83 },
+ { "lm82", lm82 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm83_id);
-static int lm83_probe(struct i2c_client *new_client)
+static int lm83_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm83_data *data;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm83_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm83_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = new_client;
- mutex_init(&data->update_lock);
+ data->regmap = devm_regmap_init(dev, NULL, client, &lm83_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
- /*
- * Register sysfs hooks
- * The LM82 can only monitor one external diode which is
- * at the same register as the LM83 temp3 entry - so we
- * declare 1 and 3 common, and then 2 and 4 only for the LM83.
- */
- data->groups[0] = &lm83_group;
- if (i2c_match_id(lm83_id, new_client)->driver_data == lm83)
- data->groups[1] = &lm83_group_opt;
+ data->type = i2c_match_id(lm83_id, client)->driver_data;
- hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
- new_client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &lm83_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -352,13 +451,6 @@ static int lm83_probe(struct i2c_client *new_client)
* Driver data (common to all clients)
*/
-static const struct i2c_device_id lm83_id[] = {
- { "lm83", lm83 },
- { "lm82", lm82 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, lm83_id);
-
static struct i2c_driver lm83_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 74019dff2550..1c9493c70813 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
- .max_convrate = 8,
+ .max_convrate = 7,
},
[lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
- .flags = LM90_HAVE_CRIT,
+ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6654] = {
+ .flags = LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
},
[max6680] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
- | LM90_HAVE_CRIT_ALRM_SWP,
+ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
@@ -848,7 +849,7 @@ static int lm90_update_device(struct device *dev)
* Re-enable ALERT# output if it was originally enabled and
* relevant alarms are all clear
*/
- if (!(data->config_orig & 0x80) &&
+ if ((client->irq || !(data->config_orig & 0x80)) &&
!(data->alarms & data->alert_alarms)) {
if (data->config & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
@@ -1807,22 +1808,22 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
if (st & LM90_STATUS_LLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 0);
+ hwmon_temp_min_alarm, 0);
if (st & LM90_STATUS_RLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 1);
+ hwmon_temp_min_alarm, 1);
if (st2 & MAX6696_STATUS2_R2LOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 2);
+ hwmon_temp_min_alarm, 2);
if (st & LM90_STATUS_LHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 0);
+ hwmon_temp_max_alarm, 0);
if (st & LM90_STATUS_RHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 1);
+ hwmon_temp_max_alarm, 1);
if (st2 & MAX6696_STATUS2_R2HIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 2);
+ hwmon_temp_max_alarm, 2);
return true;
}
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 5fcfd57df61e..4c5487aeb3cf 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -254,7 +254,7 @@ err_remove:
return err;
}
-static int max1111_remove(struct spi_device *spi)
+static void max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
@@ -265,7 +265,6 @@ static int max1111_remove(struct spi_device *spi)
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
mutex_destroy(&data->drvdata_lock);
- return 0;
}
static const struct spi_device_id max1111_ids[] = {
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 4cf4fe6809a3..93e048ee4955 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -100,7 +100,7 @@ static int max31722_probe(struct spi_device *spi)
return 0;
}
-static int max31722_remove(struct spi_device *spi)
+static void max31722_remove(struct spi_device *spi)
{
struct max31722_data *data = spi_get_drvdata(spi);
int ret;
@@ -111,8 +111,6 @@ static int max31722_remove(struct spi_device *spi)
if (ret)
/* There is nothing we can do about this ... */
dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
-
- return 0;
}
static int __maybe_unused max31722_suspend(struct device *dev)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index ccc0f047bd44..14bb7726f8d7 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -87,6 +87,9 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range; /* Index in above rpm_ranges table */
+
+ /* Optional regulator for FAN supply */
+ struct regulator *reg;
};
static struct max6639_data *max6639_update_device(struct device *dev)
@@ -516,6 +519,11 @@ static int max6639_detect(struct i2c_client *client,
return 0;
}
+static void max6639_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
static int max6639_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -528,6 +536,28 @@ static int max6639_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
+
+ data->reg = devm_regulator_get_optional(dev, "fan");
+ if (IS_ERR(data->reg)) {
+ if (PTR_ERR(data->reg) != -ENODEV)
+ return PTR_ERR(data->reg);
+
+ data->reg = NULL;
+ } else {
+ /* Spin up fans */
+ err = regulator_enable(data->reg);
+ if (err) {
+ dev_err(dev, "Failed to enable fan supply: %d\n", err);
+ return err;
+ }
+ err = devm_add_action_or_reset(dev, max6639_regulator_disable,
+ data->reg);
+ if (err) {
+ dev_err(dev, "Failed to register action: %d\n", err);
+ return err;
+ }
+ }
+
mutex_init(&data->update_lock);
/* Initialize the max6639 chip */
@@ -545,23 +575,39 @@ static int max6639_probe(struct i2c_client *client)
static int max6639_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
- if (data < 0)
- return data;
+ struct max6639_data *data = dev_get_drvdata(dev);
+ int ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+
+ if (ret < 0)
+ return ret;
+
+ if (data->reg)
+ regulator_disable(data->reg);
return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+ MAX6639_REG_GCONFIG, ret | MAX6639_GCONFIG_STANDBY);
}
static int max6639_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
- if (data < 0)
- return data;
+ struct max6639_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (data->reg) {
+ ret = regulator_enable(data->reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (ret < 0)
+ return ret;
return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+ MAX6639_REG_GCONFIG, ret & ~MAX6639_GCONFIG_STANDBY);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 4a8becdb0d58..b48bd7c961d6 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -18,15 +18,6 @@
#define MLXREG_FAN_MAX_STATE 10
#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
-/*
- * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
- * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
-#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
#define MLXREG_FAN_TACHO_DIV_MIN 283
@@ -87,13 +78,16 @@ struct mlxreg_fan_tacho {
* @connected: indicates if PWM is connected;
* @reg: register offset;
* @cooling: cooling device levels;
+ * @last_hwmon_state: last cooling state set by hwmon subsystem;
+ * @last_thermal_state: last cooling state set by thermal subsystem;
* @cdev: cooling device;
*/
struct mlxreg_fan_pwm {
struct mlxreg_fan *fan;
bool connected;
u32 reg;
- u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ unsigned long last_hwmon_state;
+ unsigned long last_thermal_state;
struct thermal_cooling_device *cdev;
};
@@ -119,6 +113,9 @@ struct mlxreg_fan {
int divider;
};
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state);
+
static int
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
@@ -213,6 +210,18 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
val > MLXREG_FAN_MAX_DUTY)
return -EINVAL;
pwm = &fan->pwm[channel];
+ /* If thermal is configured - handle PWM limit setting. */
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(val);
+ /*
+ * Update PWM only in case requested state is not less than the
+ * last thermal state.
+ */
+ if (pwm->last_hwmon_state >= pwm->last_thermal_state)
+ return mlxreg_fan_set_cur_state(pwm->cdev,
+ pwm->last_hwmon_state);
+ return 0;
+ }
return regmap_write(fan->regmap, pwm->reg, val);
default:
return -EOPNOTSUPP;
@@ -338,58 +347,22 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
{
struct mlxreg_fan_pwm *pwm = cdev->devdata;
struct mlxreg_fan *fan = pwm->fan;
- unsigned long cur_state;
- int i, config = 0;
- u32 regval;
int err;
- /*
- * Verify if this request is for changing allowed FAN dynamical
- * minimum. If it is - update cooling levels accordingly and update
- * state, if current state is below the newly requested minimum state.
- * For example, if current state is 5, and minimal state is to be
- * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
- * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
- * overwritten.
- */
- if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
- /*
- * This is configuration change, which is only supported through sysfs.
- * For configuration non-zero value is to be returned to avoid thermal
- * statistics update.
- */
- config = 1;
- state -= MLXREG_FAN_MAX_STATE;
- for (i = 0; i < state; i++)
- pwm->cooling_levels[i] = state;
- for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
- pwm->cooling_levels[i] = i;
-
- err = regmap_read(fan->regmap, pwm->reg, &regval);
- if (err) {
- dev_err(fan->dev, "Failed to query PWM duty\n");
- return err;
- }
-
- cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
- if (state < cur_state)
- return config;
-
- state = cur_state;
- }
-
if (state > MLXREG_FAN_MAX_STATE)
return -EINVAL;
- /* Normalize the state to the valid speed range. */
- state = pwm->cooling_levels[state];
+ /* Save thermal state. */
+ pwm->last_thermal_state = state;
+
+ state = max_t(unsigned long, state, pwm->last_hwmon_state);
err = regmap_write(fan->regmap, pwm->reg,
MLXREG_FAN_PWM_STATE2DUTY(state));
if (err) {
dev_err(fan->dev, "Failed to write PWM duty\n");
return err;
}
- return config;
+ return 0;
}
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
@@ -564,7 +537,7 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
{
- int i, j;
+ int i;
for (i = 0; i < MLXREG_FAN_MAX_PWM; i++) {
struct mlxreg_fan_pwm *pwm = &fan->pwm[i];
@@ -579,11 +552,8 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
return PTR_ERR(pwm->cdev);
}
- /* Init cooling levels per PWM state. */
- for (j = 0; j < MLXREG_FAN_SPEED_MIN_LEVEL; j++)
- pwm->cooling_levels[j] = MLXREG_FAN_SPEED_MIN_LEVEL;
- for (j = MLXREG_FAN_SPEED_MIN_LEVEL; j <= MLXREG_FAN_MAX_STATE; j++)
- pwm->cooling_levels[j] = j;
+ /* Set minimal PWM speed. */
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
}
return 0;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index fd3f91cb01c6..2b91f7e05126 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -308,6 +308,7 @@ static void superio_exit(struct nct6775_sio_data *sio_data)
#define NUM_TEMP 10 /* Max number of temp attribute sets w/ limits*/
#define NUM_TEMP_FIXED 6 /* Max number of fixed temp attribute sets */
+#define NUM_TSI_TEMP 8 /* Max number of TSI temp register pairs */
#define NUM_REG_ALARM 7 /* Max number of alarm registers */
#define NUM_REG_BEEP 5 /* Max number of beep registers */
@@ -498,6 +499,8 @@ static const u16 NCT6775_REG_TEMP_CRIT[32] = {
[11] = 0xa07
};
+static const u16 NCT6775_REG_TSI_TEMP[] = { 0x669 };
+
/* NCT6776 specific data */
/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
@@ -581,6 +584,9 @@ static const u16 NCT6776_REG_TEMP_CRIT[32] = {
[12] = 0x70a,
};
+static const u16 NCT6776_REG_TSI_TEMP[] = {
+ 0x409, 0x40b, 0x40d, 0x40f, 0x411, 0x413, 0x415, 0x417 };
+
/* NCT6779 specific data */
static const u16 NCT6779_REG_IN[] = {
@@ -864,6 +870,8 @@ static const char *const nct6796_temp_label[] = {
#define NCT6796_TEMP_MASK 0xbfff0ffe
#define NCT6796_VIRT_TEMP_MASK 0x80000c00
+static const u16 NCT6796_REG_TSI_TEMP[] = { 0x409, 0x40b };
+
static const char *const nct6798_temp_label[] = {
"",
"SYSTIN",
@@ -1005,6 +1013,8 @@ static const u16 NCT6106_REG_TEMP_CRIT[32] = {
[12] = 0x205,
};
+static const u16 NCT6106_REG_TSI_TEMP[] = { 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x63, 0x65, 0x67 };
+
/* NCT6112D/NCT6114D/NCT6116D specific data */
static const u16 NCT6116_REG_FAN[] = { 0x20, 0x22, 0x24, 0x26, 0x28 };
@@ -1069,6 +1079,8 @@ static const s8 NCT6116_BEEP_BITS[] = {
34, -1 /* intrusion0, intrusion1 */
};
+static const u16 NCT6116_REG_TSI_TEMP[] = { 0x59, 0x5b };
+
static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
{
if (mode == 0 && pwm == 255)
@@ -1169,17 +1181,23 @@ static inline u8 in_to_reg(u32 val, u8 nr)
return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
}
+/* TSI temperatures are in 8.3 format */
+static inline unsigned int tsi_temp_from_reg(unsigned int reg)
+{
+ return (reg >> 5) * 125;
+}
+
/*
* Data structures and manipulation thereof
*/
struct nct6775_data {
int addr; /* IO base of hw monitor block */
- int sioreg; /* SIO register address */
+ struct nct6775_sio_data *sio_data;
enum kinds kind;
const char *name;
- const struct attribute_group *groups[6];
+ const struct attribute_group *groups[7];
u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit
@@ -1240,6 +1258,8 @@ struct nct6775_data {
const u16 *REG_ALARM;
const u16 *REG_BEEP;
+ const u16 *REG_TSI_TEMP;
+
unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
@@ -1267,6 +1287,7 @@ struct nct6775_data {
s8 temp_offset[NUM_TEMP_FIXED];
s16 temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit */
+ s16 tsi_temp[NUM_TSI_TEMP];
u64 alarms;
u64 beeps;
@@ -1315,6 +1336,7 @@ struct nct6775_data {
u16 have_temp;
u16 have_temp_fixed;
+ u16 have_tsi_temp;
u16 have_in;
/* Remember extra register values over suspend/resume */
@@ -1464,13 +1486,15 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
switch (data->kind) {
case nct6106:
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+ (reg >= 0x59 && reg < 0x69 && (reg & 1)) ||
reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
reg == 0x111 || reg == 0x121 || reg == 0x131;
case nct6116:
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
- reg == 0x26 || reg == 0x28 || reg == 0xe0 || reg == 0xe2 ||
- reg == 0xe4 || reg == 0xe6 || reg == 0xe8 || reg == 0x111 ||
- reg == 0x121 || reg == 0x131 || reg == 0x191 || reg == 0x1a1;
+ reg == 0x26 || reg == 0x28 || reg == 0x59 || reg == 0x5b ||
+ reg == 0xe0 || reg == 0xe2 || reg == 0xe4 || reg == 0xe6 ||
+ reg == 0xe8 || reg == 0x111 || reg == 0x121 || reg == 0x131 ||
+ reg == 0x191 || reg == 0x1a1;
case nct6775:
return (((reg & 0xff00) == 0x100 ||
(reg & 0xff00) == 0x200) &&
@@ -1479,7 +1503,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
(reg & 0x00ff) == 0x55)) ||
(reg & 0xfff0) == 0x630 ||
reg == 0x640 || reg == 0x642 ||
- reg == 0x662 ||
+ reg == 0x662 || reg == 0x669 ||
((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
reg == 0x73 || reg == 0x75 || reg == 0x77;
case nct6776:
@@ -1490,6 +1514,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
(reg & 0x00ff) == 0x55)) ||
(reg & 0xfff0) == 0x630 ||
reg == 0x402 ||
+ (reg >= 0x409 && reg < 0x419 && (reg & 1)) ||
reg == 0x640 || reg == 0x642 ||
((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
reg == 0x73 || reg == 0x75 || reg == 0x77;
@@ -1504,6 +1529,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
(reg & 0xfff0) == 0x4c0 ||
reg == 0x402 ||
+ (reg >= 0x409 && reg < 0x419 && (reg & 1)) ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 || reg == 0x64a ||
reg == 0x64c ||
@@ -1987,6 +2013,12 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
data->REG_TEMP_OFFSET[i]);
}
+ for (i = 0; i < NUM_TSI_TEMP; i++) {
+ if (!(data->have_tsi_temp & BIT(i)))
+ continue;
+ data->tsi_temp[i] = data->read_value(data, data->REG_TSI_TEMP[i]);
+ }
+
data->alarms = 0;
for (i = 0; i < NUM_REG_ALARM; i++) {
u8 alarm;
@@ -2670,6 +2702,44 @@ static const struct sensor_template_group nct6775_temp_template_group = {
.base = 1,
};
+static ssize_t show_tsi_temp(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nct6775_data *data = nct6775_update_device(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+
+ return sysfs_emit(buf, "%u\n", tsi_temp_from_reg(data->tsi_temp[sattr->index]));
+}
+
+static ssize_t show_tsi_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+
+ return sysfs_emit(buf, "TSI%d_TEMP\n", sattr->index);
+}
+
+SENSOR_TEMPLATE(tsi_temp_input, "temp%d_input", 0444, show_tsi_temp, NULL, 0);
+SENSOR_TEMPLATE(tsi_temp_label, "temp%d_label", 0444, show_tsi_temp_label, NULL, 0);
+
+static umode_t nct6775_tsi_temp_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int temp = index / 2;
+
+ return (data->have_tsi_temp & BIT(temp)) ? attr->mode : 0;
+}
+
+/*
+ * The index calculation in nct6775_tsi_temp_is_visible() must be kept in
+ * sync with the size of this array.
+ */
+static struct sensor_device_template *nct6775_tsi_temp_template[] = {
+ &sensor_dev_template_tsi_temp_input,
+ &sensor_dev_template_tsi_temp_label,
+ NULL
+};
+
static ssize_t
show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -3559,7 +3629,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
+ struct nct6775_sio_data *sio_data = data->sio_data;
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3948,10 +4018,11 @@ static int nct6775_probe(struct platform_device *pdev)
const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
- int num_reg_temp, num_reg_temp_mon;
+ int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
u8 cr2a;
struct attribute_group *group;
struct device *hwmon_dev;
+ struct sensor_template_group tsi_temp_tg;
int num_attr_groups = 0;
if (sio_data->access == access_direct) {
@@ -3967,7 +4038,7 @@ static int nct6775_probe(struct platform_device *pdev)
return -ENOMEM;
data->kind = sio_data->kind;
- data->sioreg = sio_data->sioreg;
+ data->sio_data = sio_data;
if (sio_data->access == access_direct) {
data->addr = res->start;
@@ -4043,11 +4114,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6106_ALARM_BITS;
data->REG_BEEP = NCT6106_REG_BEEP;
data->BEEP_BITS = NCT6106_BEEP_BITS;
+ data->REG_TSI_TEMP = NCT6106_REG_TSI_TEMP;
reg_temp = NCT6106_REG_TEMP;
reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6106_REG_TSI_TEMP);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -4116,11 +4189,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6116_ALARM_BITS;
data->REG_BEEP = NCT6106_REG_BEEP;
data->BEEP_BITS = NCT6116_BEEP_BITS;
+ data->REG_TSI_TEMP = NCT6116_REG_TSI_TEMP;
reg_temp = NCT6106_REG_TEMP;
reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6116_REG_TSI_TEMP);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -4191,11 +4266,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
data->REG_BEEP = NCT6775_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6775_REG_TSI_TEMP;
reg_temp = NCT6775_REG_TEMP;
reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6775_REG_TSI_TEMP);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
@@ -4264,11 +4341,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
reg_temp = NCT6775_REG_TEMP;
reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
@@ -4341,11 +4420,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6779_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
reg_temp = NCT6779_REG_TEMP;
reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -4460,6 +4541,24 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
else
data->REG_BEEP = NCT6792_REG_BEEP;
+ switch (data->kind) {
+ case nct6791:
+ case nct6792:
+ case nct6793:
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
+ break;
+ case nct6795:
+ case nct6796:
+ case nct6797:
+ case nct6798:
+ data->REG_TSI_TEMP = NCT6796_REG_TSI_TEMP;
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6796_REG_TSI_TEMP);
+ break;
+ default:
+ num_reg_tsi_temp = 0;
+ break;
+ }
reg_temp = NCT6779_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
@@ -4659,6 +4758,12 @@ static int nct6775_probe(struct platform_device *pdev)
}
#endif /* USE_ALTERNATE */
+ /* Check which TSIx_TEMP registers are active */
+ for (i = 0; i < num_reg_tsi_temp; i++) {
+ if (data->read_value(data, data->REG_TSI_TEMP[i]))
+ data->have_tsi_temp |= BIT(i);
+ }
+
/* Initialize the chip */
nct6775_init_device(data);
@@ -4766,6 +4871,18 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR(group);
data->groups[num_attr_groups++] = group;
+
+ if (data->have_tsi_temp) {
+ tsi_temp_tg.templates = nct6775_tsi_temp_template;
+ tsi_temp_tg.is_visible = nct6775_tsi_temp_is_visible;
+ tsi_temp_tg.base = fls(data->have_temp) + 1;
+ group = nct6775_create_attr_group(dev, &tsi_temp_tg, fls(data->have_tsi_temp));
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[num_attr_groups++] = group;
+ }
+
data->groups[num_attr_groups++] = &nct6775_group_other;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
@@ -4985,9 +5102,14 @@ static struct platform_device *pdev[2];
static const char * const asus_wmi_boards[] = {
"ProArt X570-CREATOR WIFI",
+ "Pro B550M-C",
"Pro WS X570-ACE",
"PRIME B360-PLUS",
"PRIME B460-PLUS",
+ "PRIME B550-PLUS",
+ "PRIME B550M-A",
+ "PRIME B550M-A (WI-FI)",
+ "PRIME X570-P",
"PRIME X570-PRO",
"ROG CROSSHAIR VIII DARK HERO",
"ROG CROSSHAIR VIII FORMULA",
@@ -4997,10 +5119,22 @@ static const char * const asus_wmi_boards[] = {
"ROG STRIX B550-E GAMING",
"ROG STRIX B550-F GAMING",
"ROG STRIX B550-F GAMING (WI-FI)",
+ "ROG STRIX B550-F GAMING WIFI II",
"ROG STRIX B550-I GAMING",
+ "ROG STRIX B550-XE GAMING (WI-FI)",
+ "ROG STRIX X570-E GAMING",
"ROG STRIX X570-F GAMING",
"ROG STRIX X570-I GAMING",
"ROG STRIX Z390-E GAMING",
+ "ROG STRIX Z390-F GAMING",
+ "ROG STRIX Z390-H GAMING",
+ "ROG STRIX Z390-I GAMING",
+ "ROG STRIX Z490-A GAMING",
+ "ROG STRIX Z490-E GAMING",
+ "ROG STRIX Z490-F GAMING",
+ "ROG STRIX Z490-G GAMING",
+ "ROG STRIX Z490-G GAMING (WI-FI)",
+ "ROG STRIX Z490-H GAMING",
"ROG STRIX Z490-I GAMING",
"TUF GAMING B550M-PLUS",
"TUF GAMING B550M-PLUS (WI-FI)",
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 414204f5704c..9c9e9f4ccb9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 },
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
- [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 },
+ [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 0cb4a0a6cbc1..f00cd59f1d19 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -674,6 +674,9 @@ static ssize_t occ_show_caps_3(struct device *dev,
case 7:
val = caps->user_source;
break;
+ case 8:
+ val = get_unaligned_be16(&caps->soft_min) * 1000000ULL;
+ break;
default:
return -EINVAL;
}
@@ -835,12 +838,13 @@ static int occ_setup_sensor_attrs(struct occ *occ)
case 1:
num_attrs += (sensors->caps.num_sensors * 7);
break;
- case 3:
- show_caps = occ_show_caps_3;
- fallthrough;
case 2:
num_attrs += (sensors->caps.num_sensors * 8);
break;
+ case 3:
+ show_caps = occ_show_caps_3;
+ num_attrs += (sensors->caps.num_sensors * 9);
+ break;
default:
sensors->caps.num_sensors = 0;
}
@@ -1047,6 +1051,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
show_caps, NULL, 7, 0);
attr++;
+
+ if (sensors->caps.version > 2) {
+ snprintf(attr->name, sizeof(attr->name),
+ "power%d_cap_min_soft", s);
+ attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+ show_caps, NULL,
+ 8, 0);
+ attr++;
+ }
}
}
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 5020117be740..2dd4a4d240c0 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -119,6 +119,8 @@ struct occ {
u8 prev_stat;
u8 prev_ext_stat;
u8 prev_occs_present;
+ u8 prev_ips_status;
+ u8 prev_mode;
};
int occ_setup(struct occ *occ, const char *name);
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index 03b16abef67f..b2f788a77746 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -19,6 +19,8 @@
#define OCC_EXT_STAT_DVFS_POWER BIT(6)
#define OCC_EXT_STAT_MEM_THROTTLE BIT(5)
#define OCC_EXT_STAT_QUICK_DROP BIT(4)
+#define OCC_EXT_STAT_DVFS_VDD BIT(3)
+#define OCC_EXT_STAT_GPU_THROTTLE GENMASK(2, 0)
static ssize_t occ_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -63,6 +65,18 @@ static ssize_t occ_sysfs_show(struct device *dev,
else
val = 1;
break;
+ case 8:
+ val = header->ips_status;
+ break;
+ case 9:
+ val = header->mode;
+ break;
+ case 10:
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_VDD);
+ break;
+ case 11:
+ val = header->ext_status & OCC_EXT_STAT_GPU_THROTTLE;
+ break;
default:
return -EINVAL;
}
@@ -88,6 +102,10 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4);
static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5);
static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6);
static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7);
+static SENSOR_DEVICE_ATTR(occ_ips_status, 0444, occ_sysfs_show, NULL, 8);
+static SENSOR_DEVICE_ATTR(occ_mode, 0444, occ_sysfs_show, NULL, 9);
+static SENSOR_DEVICE_ATTR(occ_dvfs_vdd, 0444, occ_sysfs_show, NULL, 10);
+static SENSOR_DEVICE_ATTR(occ_gpu_throttle, 0444, occ_sysfs_show, NULL, 11);
static DEVICE_ATTR_RO(occ_error);
static struct attribute *occ_attributes[] = {
@@ -99,6 +117,10 @@ static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr,
&sensor_dev_attr_occ_state.dev_attr.attr,
&sensor_dev_attr_occs_present.dev_attr.attr,
+ &sensor_dev_attr_occ_ips_status.dev_attr.attr,
+ &sensor_dev_attr_occ_mode.dev_attr.attr,
+ &sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr,
+ &sensor_dev_attr_occ_gpu_throttle.dev_attr.attr,
&dev_attr_occ_error.attr,
NULL
};
@@ -156,12 +178,34 @@ void occ_sysfs_poll_done(struct occ *occ)
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
+ if ((header->ext_status & OCC_EXT_STAT_DVFS_VDD) !=
+ (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_VDD)) {
+ name = sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
+ if ((header->ext_status & OCC_EXT_STAT_GPU_THROTTLE) !=
+ (occ->prev_ext_stat & OCC_EXT_STAT_GPU_THROTTLE)) {
+ name = sensor_dev_attr_occ_gpu_throttle.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
if ((header->status & OCC_STAT_MASTER) &&
header->occs_present != occ->prev_occs_present) {
name = sensor_dev_attr_occs_present.dev_attr.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
+ if (header->ips_status != occ->prev_ips_status) {
+ name = sensor_dev_attr_occ_ips_status.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
+ if (header->mode != occ->prev_mode) {
+ name = sensor_dev_attr_occ_mode.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
if (occ->error && occ->error != occ->prev_error) {
name = dev_attr_occ_error.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
@@ -174,6 +218,8 @@ done:
occ->prev_stat = header->status;
occ->prev_ext_stat = header->ext_status;
occ->prev_occs_present = header->occs_present;
+ occ->prev_ips_status = header->ips_status;
+ occ->prev_mode = header->mode;
}
int occ_setup_sysfs(struct occ *occ)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 41f6cbf96d3b..a2ea1d5a8765 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -174,6 +174,13 @@ config SENSORS_LM25066
This driver can also be built as a module. If so, the module will
be called lm25066.
+config SENSORS_LM25066_REGULATOR
+ bool "Regulator support for LM25066 and compatibles"
+ depends on SENSORS_LM25066 && REGULATOR
+ help
+ If you say yes here you get regulator support for National
+ Semiconductor LM25066, LM5064, and LM5066.
+
config SENSORS_LTC2978
tristate "Linear Technologies LTC2978 and compatibles"
help
@@ -189,8 +196,8 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear Technology
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
- LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
+ LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
+ LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
and LTM4700.
config SENSORS_LTC3815
@@ -310,6 +317,22 @@ config SENSORS_PIM4328
This driver can also be built as a module. If so, the module will
be called pim4328.
+config SENSORS_PLI1209BC
+ tristate "Vicor PLI1209BC"
+ help
+ If you say yes here you get hardware monitoring support for Vicor
+ PLI1209BC Digital Supervisor.
+
+ This driver can also be built as a module. If so, the module will
+ be called pli1209bc.
+
+config SENSORS_PLI1209BC_REGULATOR
+ bool "Regulator support for PLI1209BC"
+ depends on SENSORS_PLI1209BC && REGULATOR
+ help
+ If you say yes here you get regulator support for Vicor PLI1209BC
+ Digital Supervisor.
+
config SENSORS_PM6764TR
tristate "ST PM6764TR"
help
@@ -394,6 +417,12 @@ config SENSORS_XDPE122
This driver can also be built as a module. If so, the module will
be called xdpe12284.
+config SENSORS_XDPE122_REGULATOR
+ bool "Regulator support for XDPE122 and compatibles"
+ depends on SENSORS_XDPE122 && REGULATOR
+ help
+ Uses the xdpe12284 or compatible as regulator.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e5935f70c9e0..a4a96ac71de7 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
+obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index d311e0557401..3b07bfb43e93 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -475,6 +475,7 @@ static int adm1275_probe(struct i2c_client *client)
int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
int tindex = -1;
u32 shunt;
+ u32 avg;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -687,7 +688,7 @@ static int adm1275_probe(struct i2c_client *client)
if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
(ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
- ret = i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_word_data(client,
ADM1275_PMON_CONFIG,
config);
if (ret < 0) {
@@ -756,6 +757,43 @@ static int adm1275_probe(struct i2c_client *client)
return -ENODEV;
}
+ if (data->have_power_sampling &&
+ of_property_read_u32(client->dev.of_node,
+ "adi,power-sample-average", &avg) == 0) {
+ if (!avg || avg > ADM1275_SAMPLES_AVG_MAX ||
+ BIT(__fls(avg)) != avg) {
+ dev_err(&client->dev,
+ "Invalid number of power samples");
+ return -EINVAL;
+ }
+ ret = adm1275_write_pmon_config(data, client, true,
+ ilog2(avg));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Setting power sample averaging failed with error %d",
+ ret);
+ return ret;
+ }
+ }
+
+ if (of_property_read_u32(client->dev.of_node,
+ "adi,volt-curr-sample-average", &avg) == 0) {
+ if (!avg || avg > ADM1275_SAMPLES_AVG_MAX ||
+ BIT(__fls(avg)) != avg) {
+ dev_err(&client->dev,
+ "Invalid number of voltage/current samples");
+ return -EINVAL;
+ }
+ ret = adm1275_write_pmon_config(data, client, false,
+ ilog2(avg));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Setting voltage and current sample averaging failed with error %d",
+ ret);
+ return ret;
+ }
+ }
+
if (voindex < 0)
voindex = vindex;
if (vindex >= 0) {
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 0ea7e1c18bdc..09276e397194 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -62,7 +62,7 @@ static const struct i2c_device_id ir38064_id[] = {
MODULE_DEVICE_TABLE(i2c, ir38064_id);
-static const struct of_device_id ir38064_of_match[] = {
+static const struct of_device_id __maybe_unused ir38064_of_match[] = {
{ .compatible = "infineon,ir38060" },
{ .compatible = "infineon,ir38064" },
{ .compatible = "infineon,ir38164" },
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 8402b41520eb..09792cd03d9f 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -435,6 +435,12 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
return ret;
}
+#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
+static const struct regulator_desc lm25066_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+#endif
+
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
{"lm25066", lm25066},
@@ -545,6 +551,14 @@ static int lm25066_probe(struct i2c_client *client)
info->m[PSC_CURRENT_IN] = info->m[PSC_CURRENT_IN] * shunt / 1000;
info->m[PSC_POWER] = info->m[PSC_POWER] * shunt / 1000;
+#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
+ /* LM25056 doesn't support OPERATION */
+ if (data->id != lm25056) {
+ info->num_regulators = ARRAY_SIZE(lm25066_reg_desc);
+ info->reg_desc = lm25066_reg_desc;
+ }
+#endif
+
return pmbus_do_probe(client, info);
}
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
new file mode 100644
index 000000000000..05b4ee35ba27
--- /dev/null
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Vicor PLI1209BC Digital Supervisor
+ *
+ * Copyright (c) 2022 9elements GmbH
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/regulator/driver.h>
+#include "pmbus.h"
+
+/*
+ * The capability command is only supported at page 0. Probing the device while
+ * the page register is set to 1 will falsely enable PEC support. Disable
+ * capability probing accordingly, since the PLI1209BC does not have any
+ * additional capabilities.
+ */
+static struct pmbus_platform_data pli1209bc_plat_data = {
+ .flags = PMBUS_NO_CAPABILITY,
+};
+
+static int pli1209bc_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int data;
+
+ switch (reg) {
+ /* PMBUS_READ_POUT uses a direct format with R=0 */
+ case PMBUS_READ_POUT:
+ data = pmbus_read_word_data(client, page, phase, reg);
+ if (data < 0)
+ return data;
+ data = sign_extend32(data, 15) * 10;
+ return clamp_val(data, -32768, 32767) & 0xffff;
+ /*
+ * PMBUS_READ_VOUT and PMBUS_READ_TEMPERATURE_1 return invalid data
+ * when the BCM is turned off. Since it is not possible to return
+ * ENODATA error, return zero instead.
+ */
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ data = pmbus_read_word_data(client, page, phase,
+ PMBUS_STATUS_WORD);
+ if (data < 0)
+ return data;
+ if (data & PB_STATUS_POWER_GOOD_N)
+ return 0;
+ return pmbus_read_word_data(client, page, phase, reg);
+ default:
+ return -ENODATA;
+ }
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
+static const struct regulator_desc pli1209bc_reg_desc = {
+ .name = "vout2",
+ .id = 1,
+ .of_match = of_match_ptr("vout2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .ops = &pmbus_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static struct pmbus_driver_info pli1209bc_info = {
+ .pages = 2,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 1,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ /*
+ * Page 0 sums up all attributes except voltage readings.
+ * The pli1209 digital supervisor only contains a single BCM, making
+ * page 0 redundant.
+ */
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT,
+ .read_word_data = pli1209bc_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = &pli1209bc_reg_desc,
+#endif
+};
+
+static int pli1209bc_probe(struct i2c_client *client)
+{
+ client->dev.platform_data = &pli1209bc_plat_data;
+ return pmbus_do_probe(client, &pli1209bc_info);
+}
+
+static const struct i2c_device_id pli1209bc_id[] = {
+ {"pli1209bc", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pli1209bc_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pli1209bc_of_match[] = {
+ { .compatible = "vicor,pli1209bc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pli1209bc_of_match);
+#endif
+
+static struct i2c_driver pli1209bc_driver = {
+ .driver = {
+ .name = "pli1209bc",
+ .of_match_table = of_match_ptr(pli1209bc_of_match),
+ },
+ .probe_new = pli1209bc_probe,
+ .id_table = pli1209bc_id,
+};
+
+module_i2c_driver(pli1209bc_driver);
+
+MODULE_AUTHOR("Marcello Sylvester Bauer <sylv@sylv.io>");
+MODULE_DESCRIPTION("PMBus driver for Vicor PLI1209BC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index e0aa8aa46d8c..e74b6ef070f3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm };
/*
* STATUS_VOUT, STATUS_INPUT
*/
+#define PB_VOLTAGE_VIN_OFF BIT(3)
#define PB_VOLTAGE_UV_FAULT BIT(4)
#define PB_VOLTAGE_UV_WARNING BIT(5)
#define PB_VOLTAGE_OV_WARNING BIT(6)
@@ -464,6 +465,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
#define PMBUS_REGULATOR(_name, _id) \
[_id] = { \
.name = (_name # _id), \
+ .supply_name = "vin", \
.id = (_id), \
.of_match = of_match_ptr(_name # _id), \
.regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 776ee2237be2..b2618b1d529e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
pmbus_update_sensor_data(client, s2);
regval = status & mask;
+ if (regval) {
+ ret = pmbus_write_byte_data(client, page, reg, regval);
+ if (ret)
+ goto unlock;
+ }
if (s1 && s2) {
s64 v1, v2;
@@ -1368,7 +1373,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
.reg = PMBUS_VIN_UV_FAULT_LIMIT,
.attr = "lcrit",
.alarm = "lcrit_alarm",
- .sbit = PB_VOLTAGE_UV_FAULT,
+ .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF,
}, {
.reg = PMBUS_VIN_OV_WARN_LIMIT,
.attr = "max",
@@ -2386,10 +2391,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
int ret;
+ mutex_lock(&data->update_lock);
ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+ mutex_unlock(&data->update_lock);
+
if (ret < 0)
return ret;
@@ -2400,11 +2409,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
+ int ret;
- return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
- PB_OPERATION_CONTROL_ON,
- enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_lock(&data->update_lock);
+ ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ PB_OPERATION_CONTROL_ON,
+ enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
}
static int pmbus_regulator_enable(struct regulator_dev *rdev)
@@ -2417,10 +2432,124 @@ static int pmbus_regulator_disable(struct regulator_dev *rdev)
return _pmbus_regulator_on_off(rdev, 0);
}
+/* A PMBus status flag and the corresponding REGULATOR_ERROR_* flag */
+struct pmbus_regulator_status_assoc {
+ int pflag, rflag;
+};
+
+/* PMBus->regulator bit mappings for a PMBus status register */
+struct pmbus_regulator_status_category {
+ int func;
+ int reg;
+ const struct pmbus_regulator_status_assoc *bits; /* zero-terminated */
+};
+
+static const struct pmbus_regulator_status_category pmbus_regulator_flag_map[] = {
+ {
+ .func = PMBUS_HAVE_STATUS_VOUT,
+ .reg = PMBUS_STATUS_VOUT,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_VOLTAGE_UV_WARNING, REGULATOR_ERROR_UNDER_VOLTAGE_WARN },
+ { PB_VOLTAGE_UV_FAULT, REGULATOR_ERROR_UNDER_VOLTAGE },
+ { PB_VOLTAGE_OV_WARNING, REGULATOR_ERROR_OVER_VOLTAGE_WARN },
+ { PB_VOLTAGE_OV_FAULT, REGULATOR_ERROR_REGULATION_OUT },
+ { },
+ },
+ }, {
+ .func = PMBUS_HAVE_STATUS_IOUT,
+ .reg = PMBUS_STATUS_IOUT,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_IOUT_OC_WARNING, REGULATOR_ERROR_OVER_CURRENT_WARN },
+ { PB_IOUT_OC_FAULT, REGULATOR_ERROR_OVER_CURRENT },
+ { PB_IOUT_OC_LV_FAULT, REGULATOR_ERROR_OVER_CURRENT },
+ { },
+ },
+ }, {
+ .func = PMBUS_HAVE_STATUS_TEMP,
+ .reg = PMBUS_STATUS_TEMPERATURE,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_TEMP_OT_WARNING, REGULATOR_ERROR_OVER_TEMP_WARN },
+ { PB_TEMP_OT_FAULT, REGULATOR_ERROR_OVER_TEMP },
+ { },
+ },
+ },
+};
+
+static int pmbus_regulator_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ int i, status;
+ const struct pmbus_regulator_status_category *cat;
+ const struct pmbus_regulator_status_assoc *bit;
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ u8 page = rdev_get_id(rdev);
+ int func = data->info->func[page];
+
+ *flags = 0;
+
+ mutex_lock(&data->update_lock);
+
+ for (i = 0; i < ARRAY_SIZE(pmbus_regulator_flag_map); i++) {
+ cat = &pmbus_regulator_flag_map[i];
+ if (!(func & cat->func))
+ continue;
+
+ status = pmbus_read_byte_data(client, page, cat->reg);
+ if (status < 0) {
+ mutex_unlock(&data->update_lock);
+ return status;
+ }
+
+ for (bit = cat->bits; bit->pflag; bit++) {
+ if (status & bit->pflag)
+ *flags |= bit->rflag;
+ }
+ }
+
+ /*
+ * Map what bits of STATUS_{WORD,BYTE} we can to REGULATOR_ERROR_*
+ * bits. Some of the other bits are tempting (especially for cases
+ * where we don't have the relevant PMBUS_HAVE_STATUS_*
+ * functionality), but there's an unfortunate ambiguity in that
+ * they're defined as indicating a fault *or* a warning, so we can't
+ * easily determine whether to report REGULATOR_ERROR_<foo> or
+ * REGULATOR_ERROR_<foo>_WARN.
+ */
+ status = pmbus_get_status(client, page, PMBUS_STATUS_WORD);
+ mutex_unlock(&data->update_lock);
+ if (status < 0)
+ return status;
+
+ if (pmbus_regulator_is_enabled(rdev) && (status & PB_STATUS_OFF))
+ *flags |= REGULATOR_ERROR_FAIL;
+
+ /*
+ * Unlike most other status bits, PB_STATUS_{IOUT_OC,VOUT_OV} are
+ * defined strictly as fault indicators (not warnings).
+ */
+ if (status & PB_STATUS_IOUT_OC)
+ *flags |= REGULATOR_ERROR_OVER_CURRENT;
+ if (status & PB_STATUS_VOUT_OV)
+ *flags |= REGULATOR_ERROR_REGULATION_OUT;
+
+ /*
+ * If we haven't discovered any thermal faults or warnings via
+ * PMBUS_STATUS_TEMPERATURE, map PB_STATUS_TEMPERATURE to a warning as
+ * a (conservative) best-effort interpretation.
+ */
+ if (!(*flags & (REGULATOR_ERROR_OVER_TEMP | REGULATOR_ERROR_OVER_TEMP_WARN)) &&
+ (status & PB_STATUS_TEMPERATURE))
+ *flags |= REGULATOR_ERROR_OVER_TEMP_WARN;
+
+ return 0;
+}
+
const struct regulator_ops pmbus_regulator_ops = {
.enable = pmbus_regulator_enable,
.disable = pmbus_regulator_disable,
.is_enabled = pmbus_regulator_is_enabled,
+ .get_error_flags = pmbus_regulator_get_error_flags,
};
EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, PMBUS);
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index b07da06a40c9..18fffc5d749b 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -10,6 +10,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regulator/driver.h>
+
#include "pmbus.h"
#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
@@ -76,7 +78,22 @@ static int xdpe122_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
u8 vout_params;
- int i, ret;
+ int i, ret, vout_mode;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ switch (vout_mode >> 5) {
+ case 0:
+ info->format[PSC_VOLTAGE_OUT] = linear;
+ return 0;
+ case 1:
+ info->format[PSC_VOLTAGE_OUT] = vid;
+ info->read_word_data = xdpe122_read_word_data;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
for (i = 0; i < XDPE122_PAGE_NUM; i++) {
/* Read the register with VOUT scaling value.*/
@@ -107,10 +124,14 @@ static int xdpe122_identify(struct i2c_client *client,
return 0;
}
+static const struct regulator_desc xdpe122_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR("vout", 1),
+};
+
static struct pmbus_driver_info xdpe122_info = {
.pages = XDPE122_PAGE_NUM,
.format[PSC_VOLTAGE_IN] = linear,
- .format[PSC_VOLTAGE_OUT] = vid,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_IN] = linear,
.format[PSC_CURRENT_OUT] = linear,
@@ -124,7 +145,10 @@ static struct pmbus_driver_info xdpe122_info = {
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
.identify = xdpe122_identify,
- .read_word_data = xdpe122_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_XDPE122_REGULATOR)
+ .num_regulators = 2,
+ .reg_desc = xdpe122_reg_desc,
+#endif
};
static int xdpe122_probe(struct i2c_client *client)
@@ -140,6 +164,7 @@ static int xdpe122_probe(struct i2c_client *client)
}
static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe11280", 0},
{"xdpe12254", 0},
{"xdpe12284", 0},
{}
@@ -148,6 +173,7 @@ static const struct i2c_device_id xdpe122_id[] = {
MODULE_DEVICE_TABLE(i2c, xdpe122_id);
static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon,xdpe11280"},
{.compatible = "infineon,xdpe12254"},
{.compatible = "infineon,xdpe12284"},
{}
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 9e086338dcba..f77dc6db31ac 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -22,6 +22,8 @@
#define ADC_STEP_MV 2
#define ADC_MAX_LOW_MEASUREMENT_MV 2000
+enum powr1xxx_chips { powr1014, powr1220 };
+
enum powr1220_regs {
VMON_STATUS0,
VMON_STATUS1,
@@ -74,6 +76,7 @@ enum powr1220_adc_values {
struct powr1220_data {
struct i2c_client *client;
struct mutex update_lock;
+ u8 max_channels;
bool adc_valid[MAX_POWR1220_ADC_VALUES];
/* the next value is in jiffies */
unsigned long adc_last_updated[MAX_POWR1220_ADC_VALUES];
@@ -111,7 +114,7 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->adc_last_updated[ch_num] + HZ) ||
- !data->adc_valid[ch_num]) {
+ !data->adc_valid[ch_num]) {
/*
* figure out if we need to use the attenuator for
* high inputs or inputs that we don't yet have a measurement
@@ -119,12 +122,12 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
* max reading.
*/
if (data->adc_maxes[ch_num] > ADC_MAX_LOW_MEASUREMENT_MV ||
- data->adc_maxes[ch_num] == 0)
+ data->adc_maxes[ch_num] == 0)
adc_range = 1 << 4;
/* set the attenuator and mux */
result = i2c_smbus_write_byte_data(data->client, ADC_MUX,
- adc_range | ch_num);
+ adc_range | ch_num);
if (result)
goto exit;
@@ -167,135 +170,116 @@ exit:
return result;
}
-/* Shows the voltage associated with the specified ADC channel */
-static ssize_t powr1220_voltage_show(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static umode_t
+powr1220_is_visible(const void *data, enum hwmon_sensor_types type, u32
+ attr, int channel)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- int adc_val = powr1220_read_adc(dev, attr->index);
-
- if (adc_val < 0)
- return adc_val;
+ struct powr1220_data *chip_data = (struct powr1220_data *)data;
+
+ if (channel >= chip_data->max_channels)
+ return 0;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- return sprintf(buf, "%d\n", adc_val);
+ return 0;
}
-/* Shows the maximum setting associated with the specified ADC channel */
-static ssize_t powr1220_max_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int
+powr1220_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- struct powr1220_data *data = dev_get_drvdata(dev);
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = input_names[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return sprintf(buf, "%d\n", data->adc_maxes[attr->index]);
+ return -EOPNOTSUPP;
}
-/* Shows the label associated with the specified ADC channel */
-static ssize_t powr1220_label_show(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static int
+powr1220_read(struct device *dev, enum hwmon_sensor_types type, u32
+ attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct powr1220_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ ret = powr1220_read_adc(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case hwmon_in_highest:
+ *val = data->adc_maxes[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+}
- return sprintf(buf, "%s\n", input_names[attr->index]);
+ return 0;
}
-static SENSOR_DEVICE_ATTR_RO(in0_input, powr1220_voltage, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_input, powr1220_voltage, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_input, powr1220_voltage, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_input, powr1220_voltage, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_input, powr1220_voltage, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_input, powr1220_voltage, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_input, powr1220_voltage, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_input, powr1220_voltage, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_input, powr1220_voltage, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_input, powr1220_voltage, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_input, powr1220_voltage, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_input, powr1220_voltage, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_input, powr1220_voltage, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_input, powr1220_voltage, VCCINP);
-
-static SENSOR_DEVICE_ATTR_RO(in0_highest, powr1220_max, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_highest, powr1220_max, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_highest, powr1220_max, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_highest, powr1220_max, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_highest, powr1220_max, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_highest, powr1220_max, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_highest, powr1220_max, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_highest, powr1220_max, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_highest, powr1220_max, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_highest, powr1220_max, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_highest, powr1220_max, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_highest, powr1220_max, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_highest, powr1220_max, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_highest, powr1220_max, VCCINP);
-
-static SENSOR_DEVICE_ATTR_RO(in0_label, powr1220_label, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_label, powr1220_label, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_label, powr1220_label, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_label, powr1220_label, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_label, powr1220_label, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_label, powr1220_label, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_label, powr1220_label, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_label, powr1220_label, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_label, powr1220_label, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_label, powr1220_label, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_label, powr1220_label, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_label, powr1220_label, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_label, powr1220_label, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_label, powr1220_label, VCCINP);
-
-static struct attribute *powr1220_attrs[] = {
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
- &sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
- &sensor_dev_attr_in12_input.dev_attr.attr,
- &sensor_dev_attr_in13_input.dev_attr.attr,
-
- &sensor_dev_attr_in0_highest.dev_attr.attr,
- &sensor_dev_attr_in1_highest.dev_attr.attr,
- &sensor_dev_attr_in2_highest.dev_attr.attr,
- &sensor_dev_attr_in3_highest.dev_attr.attr,
- &sensor_dev_attr_in4_highest.dev_attr.attr,
- &sensor_dev_attr_in5_highest.dev_attr.attr,
- &sensor_dev_attr_in6_highest.dev_attr.attr,
- &sensor_dev_attr_in7_highest.dev_attr.attr,
- &sensor_dev_attr_in8_highest.dev_attr.attr,
- &sensor_dev_attr_in9_highest.dev_attr.attr,
- &sensor_dev_attr_in10_highest.dev_attr.attr,
- &sensor_dev_attr_in11_highest.dev_attr.attr,
- &sensor_dev_attr_in12_highest.dev_attr.attr,
- &sensor_dev_attr_in13_highest.dev_attr.attr,
-
- &sensor_dev_attr_in0_label.dev_attr.attr,
- &sensor_dev_attr_in1_label.dev_attr.attr,
- &sensor_dev_attr_in2_label.dev_attr.attr,
- &sensor_dev_attr_in3_label.dev_attr.attr,
- &sensor_dev_attr_in4_label.dev_attr.attr,
- &sensor_dev_attr_in5_label.dev_attr.attr,
- &sensor_dev_attr_in6_label.dev_attr.attr,
- &sensor_dev_attr_in7_label.dev_attr.attr,
- &sensor_dev_attr_in8_label.dev_attr.attr,
- &sensor_dev_attr_in9_label.dev_attr.attr,
- &sensor_dev_attr_in10_label.dev_attr.attr,
- &sensor_dev_attr_in11_label.dev_attr.attr,
- &sensor_dev_attr_in12_label.dev_attr.attr,
- &sensor_dev_attr_in13_label.dev_attr.attr,
+static const struct hwmon_channel_info *powr1220_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL),
NULL
};
-ATTRIBUTE_GROUPS(powr1220);
+static const struct hwmon_ops powr1220_hwmon_ops = {
+ .read = powr1220_read,
+ .read_string = powr1220_read_string,
+ .is_visible = powr1220_is_visible,
+};
+
+static const struct hwmon_chip_info powr1220_chip_info = {
+ .ops = &powr1220_hwmon_ops,
+ .info = powr1220_info,
+};
+
+static const struct i2c_device_id powr1220_ids[];
static int powr1220_probe(struct i2c_client *client)
{
@@ -309,17 +293,30 @@ static int powr1220_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
+ switch (i2c_match_id(powr1220_ids, client)->driver_data) {
+ case powr1014:
+ data->max_channels = 10;
+ break;
+ default:
+ data->max_channels = 12;
+ break;
+ }
+
mutex_init(&data->update_lock);
data->client = client;
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data, powr1220_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
+ client->name,
+ data,
+ &powr1220_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id powr1220_ids[] = {
- { "powr1220", 0, },
+ { "powr1014", powr1014, },
+ { "powr1220", powr1220, },
{ }
};
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8f1b569c69e7..25fbbd4c9a2b 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -51,6 +52,9 @@ static const u16 SCH5627_REG_FAN[SCH5627_NO_FANS] = {
static const u16 SCH5627_REG_FAN_MIN[SCH5627_NO_FANS] = {
0x62, 0x64, 0x66, 0x68 };
+static const u16 SCH5627_REG_PWM_MAP[SCH5627_NO_FANS] = {
+ 0xA0, 0xA1, 0xA2, 0xA3 };
+
static const u16 SCH5627_REG_IN_MSB[SCH5627_NO_IN] = {
0x22, 0x23, 0x24, 0x25, 0x189 };
static const u16 SCH5627_REG_IN_LSN[SCH5627_NO_IN] = {
@@ -222,6 +226,9 @@ static int reg_to_rpm(u16 reg)
static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
int channel)
{
+ if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ return 0644;
+
return 0444;
}
@@ -277,6 +284,23 @@ static int sch5627_read(struct device *dev, enum hwmon_sensor_types type, u32 at
break;
}
break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ mutex_lock(&data->update_lock);
+ ret = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_PWM_MAP[channel]);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+ default:
+ break;
+ }
+ break;
case hwmon_in:
ret = sch5627_update_in(data);
if (ret < 0)
@@ -317,10 +341,42 @@ static int sch5627_read_string(struct device *dev, enum hwmon_sensor_types type,
return -EOPNOTSUPP;
}
+static int sch5627_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ /* registers are 8 bit wide */
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = sch56xx_write_virtual_reg(data->addr, SCH5627_REG_PWM_MAP[channel],
+ val);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct hwmon_ops sch5627_ops = {
.is_visible = sch5627_is_visible,
.read = sch5627_read,
.read_string = sch5627_read_string,
+ .write = sch5627_write,
};
static const struct hwmon_channel_info *sch5627_info[] = {
@@ -341,6 +397,12 @@ static const struct hwmon_channel_info *sch5627_info[] = {
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_FAULT
),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP
+ ),
HWMON_CHANNEL_INFO(in,
HWMON_I_INPUT | HWMON_I_LABEL,
HWMON_I_INPUT | HWMON_I_LABEL,
@@ -456,11 +518,20 @@ static int sch5627_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id sch5627_device_id[] = {
+ {
+ .name = "sch5627",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sch5627_device_id);
+
static struct platform_driver sch5627_driver = {
.driver = {
.name = DRVNAME,
},
.probe = sch5627_probe,
+ .id_table = sch5627_device_id,
};
module_platform_driver(sch5627_driver);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 39ff1c9b1df5..269757bc3a9e 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -501,12 +502,21 @@ error:
return err;
}
+static const struct platform_device_id sch5636_device_id[] = {
+ {
+ .name = "sch5636",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sch5636_device_id);
+
static struct platform_driver sch5636_driver = {
.driver = {
.name = DRVNAME,
},
.probe = sch5636_probe,
.remove = sch5636_remove,
+ .id_table = sch5636_device_id,
};
module_platform_driver(sch5636_driver);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 40cdadad35e5..3ece53adabd6 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -7,8 +7,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/acpi.h>
@@ -19,7 +21,10 @@
#include <linux/slab.h>
#include "sch56xx-common.h"
-/* Insmod parameters */
+static bool ignore_dmi;
+module_param(ignore_dmi, bool, 0);
+MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
@@ -134,7 +139,7 @@ static int sch56xx_send_cmd(u16 addr, u8 cmd, u16 reg, u8 v)
/* EM Interface Polling "Algorithm" */
for (i = 0; i < max_busy_polls + max_lazy_polls; i++) {
if (i >= max_busy_polls)
- msleep(1);
+ usleep_range(1000, 2000);
/* Read Interrupt source Register */
val = inb(addr + 8);
/* Write Clear the interrupt source bits */
@@ -422,7 +427,7 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
data->wddev.max_timeout = 255 * 60;
watchdog_set_nowayout(&data->wddev, nowayout);
if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
- set_bit(WDOG_ACTIVE, &data->wddev.status);
+ set_bit(WDOG_HW_RUNNING, &data->wddev.status);
/* Since the watchdog uses a downcounter there is no register to read
the BIOS set timeout from (if any was set at all) ->
@@ -518,11 +523,42 @@ static int __init sch56xx_device_add(int address, const char *name)
return PTR_ERR_OR_ZERO(sch56xx_pdev);
}
+/* For autoloading only */
+static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+
static int __init sch56xx_init(void)
{
- int address;
const char *name = NULL;
+ int address;
+ if (!ignore_dmi) {
+ if (!dmi_check_system(sch56xx_dmi_table))
+ return -ENODEV;
+
+ /*
+ * Some machines like the Esprimo P720 and Esprimo C700 have
+ * onboard devices named " Antiope"/" Theseus" instead of
+ * "Antiope"/"Theseus", so we need to check for both.
+ */
+ if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+ return -ENODEV;
+ }
+
+ /*
+ * Some devices like the Esprimo C700 have both onboard devices,
+ * so we still have to check manually
+ */
address = sch56xx_find(0x4e, &name);
if (address < 0)
address = sch56xx_find(0x2e, &name);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 919877970ae3..5187c6dd5a4f 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -141,7 +141,6 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
- const struct of_device_id *of_id;
int idx, ret;
scpi_ops = get_scpi_ops();
@@ -171,12 +170,11 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
scpi_sensors->scpi_ops = scpi_ops;
- of_id = of_match_device(scpi_of_match, &pdev->dev);
- if (!of_id) {
+ scale = of_device_get_match_data(&pdev->dev);
+ if (!scale) {
dev_err(&pdev->dev, "Unable to initialize scpi-hwmon data\n");
return -ENODEV;
}
- scale = of_id->data;
for (i = 0, idx = 0; i < nr_sensors; i++) {
struct sensor_data *sensor = &scpi_sensors->data[idx];
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index a52ca72af120..54cd33d09688 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
#include <linux/util_macros.h>
enum tc654_regs {
@@ -379,28 +380,20 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", pwm);
}
-static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static int _set_pwm(struct tc654_data *data, unsigned long val)
{
- struct tc654_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- unsigned long val;
int ret;
- if (kstrtoul(buf, 10, &val))
- return -EINVAL;
- if (val > 255)
- return -EINVAL;
-
mutex_lock(&data->update_lock);
- if (val == 0)
+ if (val == 0) {
data->config |= TC654_REG_CONFIG_SDM;
- else
+ data->duty_cycle = 0;
+ } else {
data->config &= ~TC654_REG_CONFIG_SDM;
-
- data->duty_cycle = find_closest(val, tc654_pwm_map,
- ARRAY_SIZE(tc654_pwm_map));
+ data->duty_cycle = val - 1;
+ }
ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
if (ret < 0)
@@ -411,6 +404,24 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
out:
mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+ if (val > 0)
+ val = find_closest(val, tc654_pwm_map, ARRAY_SIZE(tc654_pwm_map)) + 1;
+
+ ret = _set_pwm(data, val);
return ret < 0 ? ret : count;
}
@@ -443,6 +454,58 @@ static struct attribute *tc654_attrs[] = {
ATTRIBUTE_GROUPS(tc654);
/*
+ * thermal cooling device functions
+ *
+ * Account for the "ShutDown Mode (SDM)" state by offsetting
+ * the 16 PWM duty cycle states by 1.
+ *
+ * State 0 = 0% PWM | Shutdown - Fan(s) are off
+ * State 1 = 30% PWM | duty_cycle = 0
+ * State 2 = ~35% PWM | duty_cycle = 1
+ * [...]
+ * State 15 = ~95% PWM | duty_cycle = 14
+ * State 16 = 100% PWM | duty_cycle = 15
+ */
+#define TC654_MAX_COOLING_STATE 16
+
+static int tc654_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ *state = TC654_MAX_COOLING_STATE;
+ return 0;
+}
+
+static int tc654_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_SDM)
+ *state = 0; /* FAN is off */
+ else
+ *state = data->duty_cycle + 1; /* offset PWM States by 1 */
+
+ return 0;
+}
+
+static int tc654_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return _set_pwm(data, clamp_val(state, 0, TC654_MAX_COOLING_STATE));
+}
+
+static const struct thermal_cooling_device_ops tc654_fan_cool_ops = {
+ .get_max_state = tc654_get_max_state,
+ .get_cur_state = tc654_get_cur_state,
+ .set_cur_state = tc654_set_cur_state,
+};
+
+/*
* device probe and removal
*/
@@ -472,7 +535,18 @@ static int tc654_probe(struct i2c_client *client)
hwmon_dev =
devm_hwmon_device_register_with_groups(dev, client->name, data,
tc654_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ struct thermal_cooling_device *cdev;
+
+ cdev = devm_thermal_of_cooling_device_register(dev, dev->of_node, client->name,
+ hwmon_dev, &tc654_fan_cool_ops);
+ return PTR_ERR_OR_ZERO(cdev);
+ }
+
+ return 0;
}
static const struct i2c_device_id tc654_id[] = {
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
new file mode 100644
index 000000000000..7814f39bd1a3
--- /dev/null
+++ b/drivers/hwmon/tmp464.c
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Driver for the Texas Instruments TMP464 SMBus temperature sensor IC.
+ * Supported models: TMP464, TMP468
+
+ * Copyright (C) 2022 Agathe Porte <agathe.porte@nokia.com>
+ * Preliminary support by:
+ * Lionel Pouliquen <lionel.lp.pouliquen@nokia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END };
+
+#define TMP464_NUM_CHANNELS 5 /* chan 0 is internal, 1-4 are remote */
+#define TMP468_NUM_CHANNELS 9 /* chan 0 is internal, 1-8 are remote */
+
+#define MAX_CHANNELS 9
+
+#define TMP464_TEMP_REG(channel) (channel)
+#define TMP464_TEMP_OFFSET_REG(channel) (0x40 + ((channel) - 1) * 8)
+#define TMP464_N_FACTOR_REG(channel) (0x41 + ((channel) - 1) * 8)
+
+static const u8 TMP464_THERM_LIMIT[MAX_CHANNELS] = {
+ 0x39, 0x42, 0x4A, 0x52, 0x5A, 0x62, 0x6a, 0x72, 0x7a };
+static const u8 TMP464_THERM2_LIMIT[MAX_CHANNELS] = {
+ 0x3A, 0x43, 0x4B, 0x53, 0x5B, 0x63, 0x6b, 0x73, 0x7b };
+
+#define TMP464_THERM_STATUS_REG 0x21
+#define TMP464_THERM2_STATUS_REG 0x22
+#define TMP464_REMOTE_OPEN_REG 0x23
+#define TMP464_CONFIG_REG 0x30
+#define TMP464_TEMP_HYST_REG 0x38
+#define TMP464_LOCK_REG 0xc4
+
+/* Identification */
+#define TMP464_MANUFACTURER_ID_REG 0xFE
+#define TMP464_DEVICE_ID_REG 0xFF
+
+/* Flags */
+#define TMP464_CONFIG_SHUTDOWN BIT(5)
+#define TMP464_CONFIG_RANGE 0x04
+#define TMP464_CONFIG_REG_REN(x) (BIT(7 + (x)))
+#define TMP464_CONFIG_REG_REN_MASK GENMASK(15, 7)
+#define TMP464_CONFIG_CONVERSION_RATE_B0 2
+#define TMP464_CONFIG_CONVERSION_RATE_B2 4
+#define TMP464_CONFIG_CONVERSION_RATE_MASK GENMASK(TMP464_CONFIG_CONVERSION_RATE_B2, \
+ TMP464_CONFIG_CONVERSION_RATE_B0)
+
+#define TMP464_UNLOCK_VAL 0xeb19
+#define TMP464_LOCK_VAL 0x5ca6
+#define TMP464_LOCKED 0x8000
+
+/* Manufacturer / Device ID's */
+#define TMP464_MANUFACTURER_ID 0x5449
+#define TMP464_DEVICE_ID 0x1468
+#define TMP468_DEVICE_ID 0x0468
+
+static const struct i2c_device_id tmp464_id[] = {
+ { "tmp464", TMP464_NUM_CHANNELS },
+ { "tmp468", TMP468_NUM_CHANNELS },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp464_id);
+
+static const struct of_device_id __maybe_unused tmp464_of_match[] = {
+ {
+ .compatible = "ti,tmp464",
+ .data = (void *)TMP464_NUM_CHANNELS
+ },
+ {
+ .compatible = "ti,tmp468",
+ .data = (void *)TMP468_NUM_CHANNELS
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tmp464_of_match);
+
+struct tmp464_channel {
+ const char *label;
+ bool enabled;
+};
+
+struct tmp464_data {
+ struct regmap *regmap;
+ struct mutex update_lock;
+ int channels;
+ s16 config_orig;
+ u16 open_reg;
+ unsigned long last_updated;
+ bool valid;
+ int update_interval;
+ struct tmp464_channel channel[MAX_CHANNELS];
+};
+
+static int temp_from_reg(s16 reg)
+{
+ return DIV_ROUND_CLOSEST((reg >> 3) * 625, 10);
+}
+
+static s16 temp_to_limit_reg(long temp)
+{
+ return DIV_ROUND_CLOSEST(temp, 500) << 6;
+}
+
+static s16 temp_to_offset_reg(long temp)
+{
+ return DIV_ROUND_CLOSEST(temp * 10, 625) << 3;
+}
+
+static int tmp464_enable_channels(struct tmp464_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ u16 enable = 0;
+ int i;
+
+ for (i = 0; i < data->channels; i++)
+ if (data->channel[i].enabled)
+ enable |= TMP464_CONFIG_REG_REN(i);
+
+ return regmap_update_bits(regmap, TMP464_CONFIG_REG, TMP464_CONFIG_REG_REN_MASK, enable);
+}
+
+static int tmp464_chip_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->update_interval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval, regval2;
+ int err = 0;
+
+ mutex_lock(&data->update_lock);
+
+ switch (attr) {
+ case hwmon_temp_max_alarm:
+ err = regmap_read(regmap, TMP464_THERM_STATUS_REG, &regval);
+ if (err < 0)
+ break;
+ *val = !!(regval & BIT(channel + 7));
+ break;
+ case hwmon_temp_crit_alarm:
+ err = regmap_read(regmap, TMP464_THERM2_STATUS_REG, &regval);
+ if (err < 0)
+ break;
+ *val = !!(regval & BIT(channel + 7));
+ break;
+ case hwmon_temp_fault:
+ /*
+ * The chip clears TMP464_REMOTE_OPEN_REG after it is read
+ * and only updates it after the next measurement cycle is
+ * complete. That means we have to cache the value internally
+ * for one measurement cycle and report the cached value.
+ */
+ if (!data->valid || time_after(jiffies, data->last_updated +
+ msecs_to_jiffies(data->update_interval))) {
+ err = regmap_read(regmap, TMP464_REMOTE_OPEN_REG, &regval);
+ if (err < 0)
+ break;
+ data->open_reg = regval;
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+ *val = !!(data->open_reg & BIT(channel + 7));
+ break;
+ case hwmon_temp_max_hyst:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
+ if (err < 0)
+ break;
+ regval -= regval2;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_max:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_crit_hyst:
+ err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
+ if (err < 0)
+ break;
+ regval -= regval2;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_crit:
+ err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_offset:
+ err = regmap_read(regmap, TMP464_TEMP_OFFSET_REG(channel), &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_input:
+ if (!data->channel[channel].enabled) {
+ err = -ENODATA;
+ break;
+ }
+ err = regmap_read(regmap, TMP464_TEMP_REG(channel), &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_enable:
+ *val = data->channel[channel].enabled;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static int tmp464_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tmp464_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return tmp464_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+
+ *str = data->channel[channel].label;
+
+ return 0;
+}
+
+static int tmp464_set_convrate(struct tmp464_data *data, long interval)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval in milli-seconds can be calculated as
+ * interval = 125 << (7 - rate);
+ * or
+ * interval = (1 << (7 - rate)) * 125;
+ * The rate is therefore
+ * rate = 7 - __fls(interval / 125);
+ * and the rounded rate is
+ * rate = 7 - __fls(interval * 4 / (125 * 3));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ interval = clamp_val(interval, 125, 16000);
+ rate = 7 - __fls(interval * 4 / (125 * 3));
+ data->update_interval = 125 << (7 - rate);
+
+ return regmap_update_bits(data->regmap, TMP464_CONFIG_REG,
+ TMP464_CONFIG_CONVERSION_RATE_MASK,
+ rate << TMP464_CONFIG_CONVERSION_RATE_B0);
+}
+
+static int tmp464_chip_write(struct tmp464_data *data, u32 attr, int channel, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return tmp464_set_convrate(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_temp_write(struct tmp464_data *data, u32 attr, int channel, long val)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int err = 0;
+
+ switch (attr) {
+ case hwmon_temp_max_hyst:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[0], &regval);
+ if (err < 0)
+ break;
+ val = clamp_val(val, -256000, 256000); /* prevent overflow/underflow */
+ val = clamp_val(temp_from_reg(regval) - val, 0, 255000);
+ err = regmap_write(regmap, TMP464_TEMP_HYST_REG,
+ DIV_ROUND_CLOSEST(val, 1000) << 7);
+ break;
+ case hwmon_temp_max:
+ val = temp_to_limit_reg(clamp_val(val, -255000, 255500));
+ err = regmap_write(regmap, TMP464_THERM_LIMIT[channel], val);
+ break;
+ case hwmon_temp_crit:
+ val = temp_to_limit_reg(clamp_val(val, -255000, 255500));
+ err = regmap_write(regmap, TMP464_THERM2_LIMIT[channel], val);
+ break;
+ case hwmon_temp_offset:
+ val = temp_to_offset_reg(clamp_val(val, -128000, 127937));
+ err = regmap_write(regmap, TMP464_TEMP_OFFSET_REG(channel), val);
+ break;
+ case hwmon_temp_enable:
+ data->channel[channel].enabled = !!val;
+ err = tmp464_enable_channels(data);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int tmp464_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+ int err;
+
+ mutex_lock(&data->update_lock);
+
+ switch (type) {
+ case hwmon_chip:
+ err = tmp464_chip_write(data, attr, channel, val);
+ break;
+ case hwmon_temp:
+ err = tmp464_temp_write(data, attr, channel, val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static umode_t tmp464_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct tmp464_data *data = _data;
+
+ if (channel >= data->channels)
+ return 0;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return 0644;
+ return 0;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_max_hyst:
+ if (!channel)
+ return 0644;
+ return 0444;
+ case hwmon_temp_label:
+ if (data->channel[channel].label)
+ return 0444;
+ return 0;
+ case hwmon_temp_fault:
+ if (channel)
+ return 0444;
+ return 0;
+ case hwmon_temp_offset:
+ if (channel)
+ return 0644;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void tmp464_restore_lock(void *regmap)
+{
+ regmap_write(regmap, TMP464_LOCK_REG, TMP464_LOCK_VAL);
+}
+
+static void tmp464_restore_config(void *_data)
+{
+ struct tmp464_data *data = _data;
+
+ regmap_write(data->regmap, TMP464_CONFIG_REG, data->config_orig);
+}
+
+static int tmp464_init_client(struct device *dev, struct tmp464_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int err;
+
+ err = regmap_read(regmap, TMP464_LOCK_REG, &regval);
+ if (err)
+ return err;
+ if (regval == TMP464_LOCKED) {
+ /* Explicitly unlock chip if it is locked */
+ err = regmap_write(regmap, TMP464_LOCK_REG, TMP464_UNLOCK_VAL);
+ if (err)
+ return err;
+ /* and lock it again when unloading the driver */
+ err = devm_add_action_or_reset(dev, tmp464_restore_lock, regmap);
+ if (err)
+ return err;
+ }
+
+ err = regmap_read(regmap, TMP464_CONFIG_REG, &regval);
+ if (err)
+ return err;
+ data->config_orig = regval;
+ err = devm_add_action_or_reset(dev, tmp464_restore_config, data);
+ if (err)
+ return err;
+
+ /* Default to 500 ms update interval */
+ err = regmap_update_bits(regmap, TMP464_CONFIG_REG,
+ TMP464_CONFIG_CONVERSION_RATE_MASK | TMP464_CONFIG_SHUTDOWN,
+ BIT(TMP464_CONFIG_CONVERSION_RATE_B0) |
+ BIT(TMP464_CONFIG_CONVERSION_RATE_B2));
+ if (err)
+ return err;
+
+ data->update_interval = 500;
+
+ return tmp464_enable_channels(data);
+}
+
+static int tmp464_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ char *name, *chip;
+ int reg;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ reg = i2c_smbus_read_word_swapped(client, TMP464_MANUFACTURER_ID_REG);
+ if (reg < 0)
+ return reg;
+ if (reg != TMP464_MANUFACTURER_ID)
+ return -ENODEV;
+
+ /* Check for "always return zero" bits */
+ reg = i2c_smbus_read_word_swapped(client, TMP464_THERM_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if (reg & 0x1f)
+ return -ENODEV;
+ reg = i2c_smbus_read_word_swapped(client, TMP464_THERM2_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if (reg & 0x1f)
+ return -ENODEV;
+
+ reg = i2c_smbus_read_word_swapped(client, TMP464_DEVICE_ID_REG);
+ if (reg < 0)
+ return reg;
+ switch (reg) {
+ case TMP464_DEVICE_ID:
+ name = "tmp464";
+ chip = "TMP464";
+ break;
+ case TMP468_DEVICE_ID:
+ name = "tmp468";
+ chip = "TMP468";
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ strscpy(info->type, name, I2C_NAME_SIZE);
+ dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n", chip, client->addr);
+
+ return 0;
+}
+
+static int tmp464_probe_child_from_dt(struct device *dev,
+ struct device_node *child,
+ struct tmp464_data *data)
+
+{
+ struct regmap *regmap = data->regmap;
+ u32 channel;
+ s32 nfactor;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &channel);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (channel >= data->channels) {
+ dev_err(dev, "invalid reg %d of %pOFn\n", channel, child);
+ return -EINVAL;
+ }
+
+ of_property_read_string(child, "label", &data->channel[channel].label);
+
+ data->channel[channel].enabled = of_device_is_available(child);
+
+ err = of_property_read_s32(child, "ti,n-factor", &nfactor);
+ if (err && err != -EINVAL)
+ return err;
+ if (!err) {
+ if (channel == 0) {
+ dev_err(dev, "n-factor can't be set for internal channel\n");
+ return -EINVAL;
+ }
+ if (nfactor > 127 || nfactor < -128) {
+ dev_err(dev, "n-factor for channel %d invalid (%d)\n",
+ channel, nfactor);
+ return -EINVAL;
+ }
+ err = regmap_write(regmap, TMP464_N_FACTOR_REG(channel),
+ (nfactor << 8) & 0xff00);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tmp464_probe_from_dt(struct device *dev, struct tmp464_data *data)
+{
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int err;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = tmp464_probe_child_from_dt(dev, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops tmp464_ops = {
+ .is_visible = tmp464_is_visible,
+ .read = tmp464_read,
+ .read_string = tmp464_read_string,
+ .write = tmp464_write,
+};
+
+static const struct hwmon_channel_info *tmp464_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE),
+ NULL
+};
+
+static const struct hwmon_chip_info tmp464_chip_info = {
+ .ops = &tmp464_ops,
+ .info = tmp464_info,
+};
+
+/* regmap */
+
+static bool tmp464_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (reg < TMP464_TEMP_REG(TMP468_NUM_CHANNELS) ||
+ reg == TMP464_THERM_STATUS_REG ||
+ reg == TMP464_THERM2_STATUS_REG ||
+ reg == TMP464_REMOTE_OPEN_REG);
+}
+
+static const struct regmap_config tmp464_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP464_DEVICE_ID_REG,
+ .volatile_reg = tmp464_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int tmp464_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct tmp464_data *data;
+ int i, err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "i2c functionality check failed\n");
+ return -ENODEV;
+ }
+ data = devm_kzalloc(dev, sizeof(struct tmp464_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->update_lock);
+
+ if (dev->of_node)
+ data->channels = (int)(unsigned long)of_device_get_match_data(&client->dev);
+ else
+ data->channels = i2c_match_id(tmp464_id, client)->driver_data;
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp464_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ for (i = 0; i < data->channels; i++)
+ data->channel[i].enabled = true;
+
+ err = tmp464_init_client(dev, data);
+ if (err)
+ return err;
+
+ if (dev->of_node) {
+ err = tmp464_probe_from_dt(dev, data);
+ if (err)
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &tmp464_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver tmp464_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp464",
+ .of_match_table = of_match_ptr(tmp464_of_match),
+ },
+ .probe_new = tmp464_probe,
+ .id_table = tmp464_id,
+ .detect = tmp464_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(tmp464_driver);
+
+MODULE_AUTHOR("Agathe Porte <agathe.porte@nokia.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP464 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c
index 44d798be3d59..2ac5fb96bba4 100644
--- a/drivers/hwmon/vexpress-hwmon.c
+++ b/drivers/hwmon/vexpress-hwmon.c
@@ -207,7 +207,6 @@ MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
static int vexpress_hwmon_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct vexpress_hwmon_data *data;
const struct vexpress_hwmon_type *type;
@@ -216,10 +215,9 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, data);
- match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
- if (!match)
+ type = of_device_get_match_data(&pdev->dev);
+ if (!type)
return -ENODEV;
- type = match->data;
data->reg = devm_regmap_init_vexpress_config(&pdev->dev);
if (IS_ERR(data->reg))
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 432ade0842f6..70a07b4e9967 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -658,13 +658,11 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
- size_t hw_sz = sizeof(struct msc_block_desc) -
- offsetof(struct msc_block_desc, hw_tag);
for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
struct msc_block_desc *bdesc = sg_virt(sg);
- memset(&bdesc->hw_tag, 0, hw_sz);
+ memset_startat(bdesc, 0, hw_tag);
}
}
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 42da31c1ab70..8a6c6ee28556 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,7 +488,7 @@ config I2C_BRCMSTB
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64 || XTENSA
+ depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -680,7 +680,7 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
select I2C_SLAVE
help
Say Y here if you want to use the IIC bus controller on
@@ -935,7 +935,7 @@ config I2C_QCOM_GENI
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Qualcomm SoCs.
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index dfc534065595..5149454eef4a 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -23,6 +23,11 @@
#define BCM2835_I2C_FIFO 0x10
#define BCM2835_I2C_DIV 0x14
#define BCM2835_I2C_DEL 0x18
+/*
+ * 16-bit field for the number of SCL cycles to wait after rising SCL
+ * before deciding the slave is not responding. 0 disables the
+ * timeout detection.
+ */
#define BCM2835_I2C_CLKT 0x1c
#define BCM2835_I2C_C_READ BIT(0)
@@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->quirks = of_device_get_match_data(&pdev->dev);
+ /*
+ * Disable the hardware clock stretching timeout. SMBUS
+ * specifies a limit for how long the device can stretch the
+ * clock, but core I2C doesn't.
+ */
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
ret = i2c_add_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 490ee3962645..b00f35c0b066 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
/* set the data in/out register size for compatible SoCs */
if (of_device_is_compatible(dev->device->of_node,
- "brcmstb,brcmper-i2c"))
+ "brcm,brcmper-i2c"))
dev->data_regsz = sizeof(u8);
else
dev->data_regsz = sizeof(u32);
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index c1de8eb66169..cf54f1cb4c57 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].adap.quirks = &cci->data->quirks;
cci->master[idx].adap.algo = &cci_algo;
cci->master[idx].adap.dev.parent = dev;
- cci->master[idx].adap.dev.of_node = child;
+ cci->master[idx].adap.dev.of_node = of_node_get(child);
cci->master[idx].master = idx;
cci->master[idx].cci = cci;
@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
continue;
ret = i2c_add_adapter(&cci->master[i].adap);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cci->master[i].adap.dev.of_node);
goto error_i2c;
+ }
}
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
return 0;
error_i2c:
- for (; i >= 0; i--) {
- if (cci->master[i].cci)
+ for (--i ; i >= 0; i--) {
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
}
error:
disable_irq(cci->irq);
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
int i;
for (i = 0; i < cci->data->num_masters; i++) {
- if (cci->master[i].cci)
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
cci_halt(cci, i);
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0b66e25c0e2d..b7640cfe0020 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -64,6 +64,7 @@ static struct cpuidle_driver intel_idle_driver = {
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int disabled_states_mask;
+static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
@@ -121,9 +122,6 @@ static unsigned int mwait_substates __initdata;
* If the local APIC timer is not known to be reliable in the target idle state,
* enable one-shot tick broadcasting for the target CPU before executing MWAIT.
*
- * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to
- * flushing user TLBs.
- *
* Must be called under local_irq_disable().
*/
static __cpuidle int intel_idle(struct cpuidle_device *dev,
@@ -761,6 +759,46 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice
+ * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in
+ * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1
+ * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then
+ * both C1 and C1E requests end up with C1, so there is effectively no C1E.
+ *
+ * By default we enable C1 and disable C1E by marking it with
+ * 'CPUIDLE_FLAG_UNUSABLE'.
+ */
+static struct cpuidle_state spr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE |
+ CPUIDLE_FLAG_UNUSABLE,
+ .exit_latency = 2,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 290,
+ .target_residency = 800,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@@ -1104,6 +1142,12 @@ static const struct idle_cpu idle_cpu_icx __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_spr __initconst = {
+ .state_table = spr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -1166,6 +1210,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
@@ -1353,6 +1398,8 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static void c1e_promotion_enable(void);
+
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@@ -1523,6 +1570,41 @@ static void __init skx_idle_state_table_update(void)
}
}
+/**
+ * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
+ */
+static void __init spr_idle_state_table_update(void)
+{
+ unsigned long long msr;
+
+ /* Check if user prefers C1E over C1. */
+ if (preferred_states_mask & BIT(2)) {
+ if (preferred_states_mask & BIT(1))
+ /* Both can't be enabled, stick to the defaults. */
+ return;
+
+ spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
+ spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
+
+ /* Enable C1E using the "C1E promotion" bit. */
+ c1e_promotion_enable();
+ disable_promotion_to_c1e = false;
+ }
+
+ /*
+ * By default, the C6 state assumes the worst-case scenario of package
+ * C6. However, if PC6 is disabled, we update the numbers to match
+ * core C6.
+ */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+
+ /* Limit value 2 and above allow for PC6. */
+ if ((msr & 0x7) < 2) {
+ spr_cstates[2].exit_latency = 190;
+ spr_cstates[2].target_residency = 600;
+ }
+}
+
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
@@ -1557,6 +1639,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
case INTEL_FAM6_SKYLAKE_X:
skx_idle_state_table_update();
break;
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ spr_idle_state_table_update();
+ break;
}
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
@@ -1629,6 +1714,15 @@ static void auto_demotion_disable(void)
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
+static void c1e_promotion_enable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits |= 0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
+
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
@@ -1798,3 +1892,14 @@ module_param(max_cstate, int, 0444);
*/
module_param_named(states_off, disabled_states_mask, uint, 0444);
MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
+/*
+ * Some platforms come with mutually exclusive C-states, so that if one is
+ * enabled, the other C-states must not be used. Example: C1 and C1E on
+ * Sapphire Rapids platform. This parameter allows for selecting the
+ * preferred C-states among the groups of mutually exclusive C-states - the
+ * selected C-states will be registered, the other C-states from the mutually
+ * exclusive group won't be registered. If the platform has no mutually
+ * exclusive C-states, this parameter has no effect.
+ */
+module_param_named(preferred_cstates, preferred_states_mask, uint, 0444);
+MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index 9f622e37477b..9040a717b247 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -87,11 +87,9 @@ static int bma400_spi_probe(struct spi_device *spi)
return bma400_probe(&spi->dev, regmap, id->name);
}
-static int bma400_spi_remove(struct spi_device *spi)
+static void bma400_spi_remove(struct spi_device *spi)
{
bma400_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bma400_spi_ids[] = {
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index e6081dd0a880..d11f668016a6 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "Unable to register iio device\n");
- goto err_trigger_unregister;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_trigger_unregister:
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
err_buffer_cleanup:
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 11559567cb39..80007cc2d044 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -35,11 +35,9 @@ static int bmc150_accel_probe(struct spi_device *spi)
true);
}
-static int bmc150_accel_remove(struct spi_device *spi)
+static void bmc150_accel_remove(struct spi_device *spi)
{
bmc150_accel_core_remove(&spi->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 758ad2f12896..06d99d9949f3 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -56,11 +56,9 @@ static int bmi088_accel_probe(struct spi_device *spi)
true);
}
-static int bmi088_accel_remove(struct spi_device *spi)
+static void bmi088_accel_remove(struct spi_device *spi)
{
bmi088_accel_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmi088_accel_id[] = {
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 32989d91b982..f7fd9e046588 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -173,12 +173,20 @@ struct fxls8962af_data {
u16 upper_thres;
};
-const struct regmap_config fxls8962af_regmap_conf = {
+const struct regmap_config fxls8962af_i2c_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = FXLS8962AF_MAX_REG,
};
-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
+EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
+
+const struct regmap_config fxls8962af_spi_regmap_conf = {
+ .reg_bits = 8,
+ .pad_bits = 8,
+ .val_bits = 8,
+ .max_register = FXLS8962AF_MAX_REG,
+};
+EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
enum {
fxls8962af_idx_x,
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index cfb004b20455..6bde9891effb 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
{
struct regmap *regmap;
- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to initialize i2c regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
index 57108d3d480b..6f4dff3238d3 100644
--- a/drivers/iio/accel/fxls8962af-spi.c
+++ b/drivers/iio/accel/fxls8962af-spi.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
{
struct regmap *regmap;
- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to initialize spi regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index b67572c3ef06..9cbe98c3ba9a 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
int fxls8962af_core_remove(struct device *dev);
extern const struct dev_pm_ops fxls8962af_pm_ops;
-extern const struct regmap_config fxls8962af_regmap_conf;
+extern const struct regmap_config fxls8962af_i2c_regmap_conf;
+extern const struct regmap_config fxls8962af_spi_regmap_conf;
#endif /* _FXLS8962AF_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 0fe570316848..ac74cdcd2bc8 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 441e6b764281..57c451cfb9e5 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -32,11 +32,9 @@ static int kxsd9_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->name);
}
-static int kxsd9_spi_remove(struct spi_device *spi)
+static void kxsd9_spi_remove(struct spi_device *spi)
{
kxsd9_common_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id kxsd9_spi_id[] = {
diff --git a/drivers/iio/accel/mma7455_spi.c b/drivers/iio/accel/mma7455_spi.c
index ecf690692dcc..b746031551a3 100644
--- a/drivers/iio/accel/mma7455_spi.c
+++ b/drivers/iio/accel/mma7455_spi.c
@@ -22,11 +22,9 @@ static int mma7455_spi_probe(struct spi_device *spi)
return mma7455_core_probe(&spi->dev, regmap, id->name);
}
-static int mma7455_spi_remove(struct spi_device *spi)
+static void mma7455_spi_remove(struct spi_device *spi)
{
mma7455_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id mma7455_spi_ids[] = {
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 4c359fb05480..c53a3398b14c 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 0570ab1cc064..5ff6bc70708b 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
return ret;
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 43ecacbdc95a..83c81072511e 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1524,7 +1524,7 @@ error_ret:
return ret;
}
-static int sca3000_remove(struct spi_device *spi)
+static void sca3000_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct sca3000_state *st = iio_priv(indio_dev);
@@ -1535,8 +1535,6 @@ static int sca3000_remove(struct spi_device *spi)
sca3000_stop_all_interrupts(st);
if (spi->irq)
free_irq(spi->irq, indio_dev);
-
- return 0;
}
static const struct spi_device_id sca3000_id[] = {
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index bc2cfa5f9592..b400bbe291aa 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -76,7 +76,7 @@
#define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
#define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
#define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6)
+#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
#define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
/* AD7124_FILTER_X */
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 1d345d66742d..c17d9b5fbaf6 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -479,7 +479,7 @@ error_disable_reg:
return ret;
}
-static int ad7266_remove(struct spi_device *spi)
+static void ad7266_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7266_state *st = iio_priv(indio_dev);
@@ -488,8 +488,6 @@ static int ad7266_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad7266_id[] = {
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index dd956a7c216e..5a55f79f2574 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -78,13 +78,11 @@ static int ltc2496_probe(struct spi_device *spi)
return ltc2497core_probe(dev, indio_dev);
}
-static int ltc2496_remove(struct spi_device *spi)
+static void ltc2496_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
ltc2497core_remove(indio_dev);
-
- return 0;
}
static const struct of_device_id ltc2496_of_match[] = {
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d1cff28cae0..b4c69acb33e3 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -459,15 +459,13 @@ reg_disable:
return ret;
}
-static int mcp320x_remove(struct spi_device *spi)
+static void mcp320x_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp320x *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(adc->reg);
-
- return 0;
}
static const struct of_device_id mcp320x_dt_ids[] = {
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 13535f148c4c..1cb4590fe412 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -321,7 +321,7 @@ reg_disable:
return ret;
}
-static int mcp3911_remove(struct spi_device *spi)
+static void mcp3911_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp3911 *adc = iio_priv(indio_dev);
@@ -331,8 +331,6 @@ static int mcp3911_remove(struct spi_device *spi)
clk_disable_unprepare(adc->clki);
if (adc->vref)
regulator_disable(adc->vref);
-
- return 0;
}
static const struct of_device_id mcp3911_dt_ids[] = {
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 42ea8bc7e780..adc5ceaef8c9 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
struct z188_adc *adc;
struct iio_dev *indio_dev;
struct resource *mem;
+ int ret;
indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
if (!indio_dev)
@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
adc->mem = mem;
mcb_set_drvdata(dev, indio_dev);
- return iio_device_register(indio_dev);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+err_unmap:
+ iounmap(adc->base);
err:
mcb_release_mem(mem);
return -ENXIO;
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 6eb62b564dae..59d75d09604f 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -503,7 +503,7 @@ err_clk_disable:
return ret;
}
-static int adc12138_remove(struct spi_device *spi)
+static void adc12138_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adc12138 *adc = iio_priv(indio_dev);
@@ -514,8 +514,6 @@ static int adc12138_remove(struct spi_device *spi)
regulator_disable(adc->vref_n);
regulator_disable(adc->vref_p);
clk_disable_unprepare(adc->cclk);
-
- return 0;
}
static const struct of_device_id adc12138_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a7efa3eada2c..e3658b969c5b 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -662,7 +662,7 @@ error_destroy_mutex:
return ret;
}
-static int ti_ads7950_remove(struct spi_device *spi)
+static void ti_ads7950_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_ads7950_state *st = iio_priv(indio_dev);
@@ -672,8 +672,6 @@ static int ti_ads7950_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
mutex_destroy(&st->slock);
-
- return 0;
}
static const struct spi_device_id ti_ads7950_id[] = {
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 2e24717d7f55..22c2583eedd0 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -479,7 +479,7 @@ err_regulator_disable:
return ret;
}
-static int ads8688_remove(struct spi_device *spi)
+static void ads8688_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ads8688_state *st = iio_priv(indio_dev);
@@ -489,8 +489,6 @@ static int ads8688_remove(struct spi_device *spi)
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ads8688_id[] = {
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 403b787f9f7e..2406eda9dfc6 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -224,7 +224,7 @@ error_disable_reg:
return ret;
}
-static int tlc4541_remove(struct spi_device *spi)
+static void tlc4541_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct tlc4541_state *st = iio_priv(indio_dev);
@@ -232,8 +232,6 @@ static int tlc4541_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
-
- return 0;
}
static const struct of_device_id tlc4541_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index d84ae6b008c1..e8fc4d01f30b 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
mutex_lock(&priv->slock);
size = 0;
- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
+ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
tsc2046_adc_group_set_cmd(priv, group, ch_idx);
group++;
@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
* enabled.
*/
size = 0;
- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
+ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 5271073bb74e..acd230a6af35 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -134,7 +134,6 @@ struct ad74413r_state {
#define AD74413R_CH_EN_MASK(x) BIT(x)
#define AD74413R_REG_DIN_COMP_OUT 0x25
-#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x
#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x))
#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0)
@@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
unsigned int offset = 0;
int ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->gpo_gpio_offsets[offset];
ret = ad74413r_set_gpo_config(st, real_offset,
@@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+ status &= BIT(real_offset);
return status ? 1 : 0;
}
@@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
if (ret)
return ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->comp_gpio_offsets[offset];
- if (val & BIT(real_offset))
- *bits |= offset;
+ __assign_bit(offset, bits, val & BIT(real_offset));
}
return ret;
@@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
{
struct ad74413r_state *st = iio_priv(indio_dev);
struct spi_transfer *xfer = st->adc_samples_xfer;
- u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+ u8 *rx_buf = st->adc_samples_buf.rx_buf;
u8 *tx_buf = st->adc_samples_tx_buf;
unsigned int channel;
int ret = -EINVAL;
@@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
spi_message_add_tail(xfer, &st->adc_samples_msg);
- xfer++;
tx_buf += AD74413R_FRAME_SIZE;
- rx_buf += AD74413R_FRAME_SIZE;
+ if (xfer != st->adc_samples_xfer)
+ rx_buf += AD74413R_FRAME_SIZE;
+ xfer++;
}
xfer->rx_buf = rx_buf;
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index cfcf18a0bce8..1134ae12e531 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -298,7 +298,7 @@ error_disable_reg:
return ret;
}
-static int ad8366_remove(struct spi_device *spi)
+static void ad8366_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8366_state *st = iio_priv(indio_dev);
@@ -308,8 +308,6 @@ static int ad8366_remove(struct spi_device *spi)
if (!IS_ERR(reg))
regulator_disable(reg);
-
- return 0;
}
static const struct spi_device_id ad8366_id[] = {
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 1aee87100038..eafaf4529df5 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -586,7 +586,7 @@ err_setup_irq:
return ret;
}
-static int ssp_remove(struct spi_device *spi)
+static void ssp_remove(struct spi_device *spi)
{
struct ssp_data *data = spi_get_drvdata(spi);
@@ -608,8 +608,6 @@ static int ssp_remove(struct spi_device *spi)
mutex_destroy(&data->pending_lock);
mfd_remove_devices(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 2d3b14c407d8..ecbc6a51d60f 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -521,7 +521,7 @@ error_free_channels:
return ret;
}
-static int ad5360_remove(struct spi_device *spi)
+static void ad5360_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5360_state *st = iio_priv(indio_dev);
@@ -531,8 +531,6 @@ static int ad5360_remove(struct spi_device *spi)
kfree(indio_dev->channels);
regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5360_ids[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index e38860a6a9f3..82e1d9bd773e 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -488,11 +488,9 @@ static int ad5380_spi_probe(struct spi_device *spi)
return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
}
-static int ad5380_spi_remove(struct spi_device *spi)
+static void ad5380_spi_remove(struct spi_device *spi)
{
ad5380_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5380_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 1c9b54c012a7..14cfabacbea5 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -491,11 +491,9 @@ static int ad5446_spi_probe(struct spi_device *spi)
&ad5446_spi_chip_info[id->driver_data]);
}
-static int ad5446_spi_remove(struct spi_device *spi)
+static void ad5446_spi_remove(struct spi_device *spi)
{
ad5446_remove(&spi->dev);
-
- return 0;
}
static struct spi_driver ad5446_spi_driver = {
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index f5e93c6acc9d..bad9bdaafa94 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -330,7 +330,7 @@ error_disable_reg:
return ret;
}
-static int ad5449_spi_remove(struct spi_device *spi)
+static void ad5449_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5449 *st = iio_priv(indio_dev);
@@ -338,8 +338,6 @@ static int ad5449_spi_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5449_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index b631261efa97..8507573aa13e 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -336,7 +336,7 @@ error_disable_reg:
return ret;
}
-static int ad5504_remove(struct spi_device *spi)
+static void ad5504_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5504_state *st = iio_priv(indio_dev);
@@ -345,8 +345,6 @@ static int ad5504_remove(struct spi_device *spi)
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad5504_id[] = {
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 6bfd7951e18c..0f7abfa75bec 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -130,11 +130,9 @@ static int ad5592r_spi_probe(struct spi_device *spi)
return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops);
}
-static int ad5592r_spi_remove(struct spi_device *spi)
+static void ad5592r_spi_remove(struct spi_device *spi)
{
ad5592r_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5592r_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 3c98941b9f99..371e812850eb 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -293,7 +293,7 @@ error_disable_reg:
return ret;
}
-static int ad5624r_remove(struct spi_device *spi)
+static void ad5624r_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5624r_state *st = iio_priv(indio_dev);
@@ -301,8 +301,6 @@ static int ad5624r_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad5624r_id[] = {
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 2628810fdbb1..d26fb29b6b04 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -95,11 +95,9 @@ static int ad5686_spi_probe(struct spi_device *spi)
ad5686_spi_write, ad5686_spi_read);
}
-static int ad5686_spi_remove(struct spi_device *spi)
+static void ad5686_spi_remove(struct spi_device *spi)
{
ad5686_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5686_spi_id[] = {
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index e37e095e94fc..4cb8471db81e 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -394,7 +394,7 @@ disable_regulator_err:
return ret;
}
-static int ad5761_remove(struct spi_device *spi)
+static void ad5761_remove(struct spi_device *spi)
{
struct iio_dev *iio_dev = spi_get_drvdata(spi);
struct ad5761_state *st = iio_priv(iio_dev);
@@ -403,8 +403,6 @@ static int ad5761_remove(struct spi_device *spi)
if (!IS_ERR_OR_NULL(st->vref_reg))
regulator_disable(st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5761_id[] = {
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index ae089b9145cb..d235a8047ba0 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -332,7 +332,7 @@ error_disable_reg:
return ret;
}
-static int ad5764_remove(struct spi_device *spi)
+static void ad5764_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5764_state *st = iio_priv(indio_dev);
@@ -341,8 +341,6 @@ static int ad5764_remove(struct spi_device *spi)
if (st->chip_info->int_vref == 0)
regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5764_ids[] = {
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 7b4579d73d18..2b14914b4050 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -428,7 +428,7 @@ error_disable_reg_pos:
return ret;
}
-static int ad5791_remove(struct spi_device *spi)
+static void ad5791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5791_state *st = iio_priv(indio_dev);
@@ -439,8 +439,6 @@ static int ad5791_remove(struct spi_device *spi)
if (!IS_ERR(st->reg_vss))
regulator_disable(st->reg_vss);
-
- return 0;
}
static const struct spi_device_id ad5791_id[] = {
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 5ecfdad54dec..6be35c92d435 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -193,7 +193,7 @@ error_disable_vrefh_reg:
return ret;
}
-static int ad8801_remove(struct spi_device *spi)
+static void ad8801_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8801_state *state = iio_priv(indio_dev);
@@ -202,8 +202,6 @@ static int ad8801_remove(struct spi_device *spi)
if (state->vrefl_reg)
regulator_disable(state->vrefl_reg);
regulator_disable(state->vrefh_reg);
-
- return 0;
}
static const struct spi_device_id ad8801_ids[] = {
diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c
index f6ec9bf5815e..c76233c9bb72 100644
--- a/drivers/iio/dac/ltc1660.c
+++ b/drivers/iio/dac/ltc1660.c
@@ -206,15 +206,13 @@ error_disable_reg:
return ret;
}
-static int ltc1660_remove(struct spi_device *spi)
+static void ltc1660_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ltc1660_priv *priv = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(priv->vref_reg);
-
- return 0;
}
static const struct of_device_id ltc1660_dt_ids[] = {
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 53e4b887d372..aed46c80757e 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -372,7 +372,7 @@ static int ltc2632_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-static int ltc2632_remove(struct spi_device *spi)
+static void ltc2632_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ltc2632_state *st = iio_priv(indio_dev);
@@ -381,8 +381,6 @@ static int ltc2632_remove(struct spi_device *spi)
if (st->vref_reg)
regulator_disable(st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ltc2632_id[] = {
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 0ae414ee1716..cb9e60e71b91 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -172,7 +172,7 @@ error_disable_reg:
return ret;
}
-static int mcp4922_remove(struct spi_device *spi)
+static void mcp4922_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp4922_state *state;
@@ -180,8 +180,6 @@ static int mcp4922_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
state = iio_priv(indio_dev);
regulator_disable(state->vref_reg);
-
- return 0;
}
static const struct spi_device_id mcp4922_id[] = {
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 6beda2193683..4e1156e6deb2 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -313,7 +313,7 @@ err:
return ret;
}
-static int ti_dac_remove(struct spi_device *spi)
+static void ti_dac_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
@@ -321,8 +321,6 @@ static int ti_dac_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
mutex_destroy(&ti_dac->lock);
regulator_disable(ti_dac->vref);
-
- return 0;
}
static const struct of_device_id ti_dac_of_id[] = {
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 99f275829ec2..e10d17e60ed3 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -292,7 +292,7 @@ err:
return ret;
}
-static int ti_dac_remove(struct spi_device *spi)
+static void ti_dac_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
@@ -300,7 +300,6 @@ static int ti_dac_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
mutex_destroy(&ti_dac->lock);
regulator_disable(ti_dac->vref);
- return 0;
}
static const struct of_device_id ti_dac_of_id[] = {
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 3d9eba716b69..f3521330f6fb 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -589,7 +589,7 @@ error_disable_clk:
return ret;
}
-static int adf4350_remove(struct spi_device *spi)
+static void adf4350_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adf4350_state *st = iio_priv(indio_dev);
@@ -604,8 +604,6 @@ static int adf4350_remove(struct spi_device *spi)
if (!IS_ERR(reg))
regulator_disable(reg);
-
- return 0;
}
static const struct of_device_id adf4350_of_match[] = {
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 6cdeb50143af..3f3c478e9baa 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
vcm = regulator_get_voltage(st->reg);
- if (vcm >= 0 && vcm < 1800000)
+ if (vcm < 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
else if (vcm > 1800000 && vcm < 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 17b939a367ad..81a6d09788bd 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index 745962e1e423..fc2e453527b9 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -27,11 +27,9 @@ static int bmg160_spi_probe(struct spi_device *spi)
return bmg160_core_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int bmg160_spi_remove(struct spi_device *spi)
+static void bmg160_spi_remove(struct spi_device *spi)
{
bmg160_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmg160_spi_id[] = {
diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c
index 77ceebef4e34..c3ac169facf9 100644
--- a/drivers/iio/gyro/fxas21002c_spi.c
+++ b/drivers/iio/gyro/fxas21002c_spi.c
@@ -34,11 +34,9 @@ static int fxas21002c_spi_probe(struct spi_device *spi)
return fxas21002c_core_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int fxas21002c_spi_remove(struct spi_device *spi)
+static void fxas21002c_spi_remove(struct spi_device *spi)
{
fxas21002c_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id fxas21002c_spi_id[] = {
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 273f16dcaff8..856ec901b091 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -570,7 +570,7 @@ err_disable_reg:
return ret;
}
-static int afe4403_remove(struct spi_device *spi)
+static void afe4403_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct afe4403_data *afe = iio_priv(indio_dev);
@@ -586,8 +586,6 @@ static int afe4403_remove(struct spi_device *spi)
ret = regulator_disable(afe->regulator);
if (ret)
dev_warn(afe->dev, "Unable to disable regulator\n");
-
- return 0;
}
static const struct spi_device_id afe4403_ids[] = {
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index ed129321a14d..f9b4540db1f4 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct adis_data *adis16480_data;
+ irq_handler_t trigger_handler = NULL;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
st->clk_freq = st->chip_info->int_clk;
}
+ /* Only use our trigger handler if burst mode is supported */
+ if (adis16480_data->burst_len)
+ trigger_handler = adis16480_trigger_handler;
+
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
- adis16480_trigger_handler);
+ trigger_handler);
if (ret)
return ret;
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 1dabfd615dab..f89724481df9 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
ret = iio_device_register(data->acc_indio_dev);
if (ret < 0) {
dev_err(&client->dev, "Failed to register acc iio device\n");
- goto err_buffer_cleanup_mag;
+ goto err_pm_cleanup;
}
ret = iio_device_register(data->mag_indio_dev);
@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
err_iio_unregister_acc:
iio_device_unregister(data->acc_indio_dev);
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup_mag:
if (client->irq > 0)
iio_triggered_buffer_cleanup(data->mag_indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 727b4b6ac696..93f0c6bce502 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
+ /*
+ * we need to wait for sensor settling time before
+ * reading data in order to avoid corrupted samples
+ */
delay = 1000000000 / sensor->odr;
- usleep_range(delay, 2 * delay);
+ usleep_range(3 * delay, 4 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 94eb9f6cf128..208b5193c621 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg
}
if (copy_to_user(ival, &fd, sizeof(fd))) {
- put_unused_fd(fd);
- ret = -EFAULT;
- goto error_free_ib;
+ /*
+ * "Leak" the fd, as there's not much we can do about this
+ * anyway. 'fd' might have been closed already, as
+ * anon_inode_getfd() called fd_install() on it, which made
+ * it reachable by userland.
+ *
+ * Instead of allowing a malicious user to play tricks with
+ * us, rely on the process exit path to do any necessary
+ * cleanup, as in releasing the file, if still needed.
+ */
+ return -EFAULT;
}
return 0;
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index f96f53175349..3d4d21f979fa 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_disable_runtime_pm;
+ goto err_pm_cleanup;
}
dev_dbg(dev, "Registered device %s\n", name);
return 0;
-err_disable_runtime_pm:
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index c6ed3ea8460a..4c570412d65c 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -29,11 +29,9 @@ static int bmc150_magn_spi_probe(struct spi_device *spi)
return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int bmc150_magn_spi_remove(struct spi_device *spi)
+static void bmc150_magn_spi_remove(struct spi_device *spi)
{
bmc150_magn_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmc150_magn_spi_id[] = {
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 89cf59a62c28..a99dd9b33e95 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -74,11 +74,9 @@ static int hmc5843_spi_probe(struct spi_device *spi)
id->driver_data, id->name);
}
-static int hmc5843_spi_remove(struct spi_device *spi)
+static void hmc5843_spi_remove(struct spi_device *spi)
{
hmc5843_common_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id hmc5843_id[] = {
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 007c2bd324cb..42723c996c9f 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -112,7 +112,7 @@ static int max5487_spi_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-static int max5487_spi_remove(struct spi_device *spi)
+static void max5487_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
int ret;
@@ -123,8 +123,6 @@ static int max5487_spi_remove(struct spi_device *spi)
ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
if (ret)
dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n");
-
- return 0;
}
static const struct spi_device_id max5487_id[] = {
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 9fa2dcd71760..7ccd960ced5d 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -107,11 +107,9 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->driver_data);
}
-static int ms5611_spi_remove(struct spi_device *spi)
+static void ms5611_spi_remove(struct spi_device *spi)
{
ms5611_remove(spi_get_drvdata(spi));
-
- return 0;
}
static const struct of_device_id ms5611_spi_matches[] = {
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index 85201a4bae44..ee8ed77536ca 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -57,11 +57,9 @@ static int zpa2326_probe_spi(struct spi_device *spi)
spi->irq, ZPA2326_DEVICE_ID, regmap);
}
-static int zpa2326_remove_spi(struct spi_device *spi)
+static void zpa2326_remove_spi(struct spi_device *spi)
{
zpa2326_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id zpa2326_spi_ids[] = {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c903b74f46a4..35f0d5e7533d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work)
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
if (ret) {
rdma_destroy_ah_attr(&ah_attr);
- return -EINVAL;
+ goto deref;
}
spin_lock_irq(&cm_id_priv->lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 27a00ce2e101..50c53409ceb6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -67,8 +67,8 @@ static const char * const cma_events[] = {
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
-static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
- union ib_gid *mgid);
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type);
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
@@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
- if (ndev) {
+ if (ndev && !send_only) {
+ enum ib_gid_type gid_type;
union ib_gid mgid;
- cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
- &mgid);
-
- if (!send_only)
- cma_igmp_send(ndev, &mgid, false);
-
- dev_put(ndev);
+ gid_type = id_priv->cma_dev->default_gid_type
+ [id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
+ gid_type);
+ cma_igmp_send(ndev, &mgid, false);
}
+ dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work);
}
@@ -3368,22 +3370,30 @@ err:
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
- if (!src_addr || !src_addr->sa_family) {
- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- src_addr->sa_family = dst_addr->sa_family;
- if (IS_ENABLED(CONFIG_IPV6) &&
- dst_addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
- } else if (dst_addr->sa_family == AF_IB) {
- ((struct sockaddr_ib *) src_addr)->sib_pkey =
- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
- }
- }
- return rdma_bind_addr(id, src_addr);
+ struct sockaddr_storage zero_sock = {};
+
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr(id, src_addr);
+
+ /*
+ * When the src_addr is not specified, automatically supply an any addr
+ */
+ zero_sock.ss_family = dst_addr->sa_family;
+ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 =
+ (struct sockaddr_in6 *)&zero_sock;
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *)dst_addr;
+
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if =
+ dst_addr6->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+ }
+ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
}
/*
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5a3bd41b331c..4d98f931a13d 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2016 HGST, a Western Digital Company.
*/
+#include <linux/memremap.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/pci-p2pdma.h>
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2b72c4fa9550..9d6ac9dff39a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,6 +95,7 @@ struct ucma_context {
u64 uid;
struct list_head list;
+ struct list_head mc_list;
struct work_struct close_work;
};
@@ -105,6 +106,7 @@ struct ucma_multicast {
u64 uid;
u8 join_state;
+ struct list_head list;
struct sockaddr_storage addr;
};
@@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_WORK(&ctx->close_work, ucma_close_id);
init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
@@ -484,19 +487,19 @@ err1:
static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
- struct ucma_multicast *mc;
- unsigned long index;
+ struct ucma_multicast *mc, *tmp;
- xa_for_each(&multicast_table, index, mc) {
- if (mc->ctx != ctx)
- continue;
+ xa_lock(&multicast_table);
+ list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
+ list_del(&mc->list);
/*
* At this point mc->ctx->ref is 0 so the mc cannot leave the
* lock on the reader and this is enough serialization
*/
- xa_erase(&multicast_table, index);
+ __xa_erase(&multicast_table, mc->id);
kfree(mc);
}
+ xa_unlock(&multicast_table);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
@@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ xa_lock(&multicast_table);
+ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_mc;
}
+ list_add_tail(&mc->list, &ctx->mc_list);
+ xa_unlock(&multicast_table);
+
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
@@ -1500,8 +1507,11 @@ err_leave_multicast:
mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
err_xa_erase:
- xa_erase(&multicast_table, mc->id);
+ xa_lock(&multicast_table);
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
err_free_mc:
+ xa_unlock(&multicast_table);
kfree(mc);
err_put_ctx:
ucma_put_ctx(ctx);
@@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-EINVAL);
else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
- else
- __xa_erase(&multicast_table, mc->id);
- xa_unlock(&multicast_table);
if (IS_ERR(mc)) {
+ xa_unlock(&multicast_table);
ret = PTR_ERR(mc);
goto out;
}
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+ xa_unlock(&multicast_table);
+
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 909122934246..aec60d4888eb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -55,7 +55,7 @@ union hfi1_ipoib_flow {
*/
struct ipoib_txreq {
struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr;
int sdma_status;
int complete;
struct hfi1_ipoib_dev_priv *priv;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index e1a2b02bbd91..5d814afdf7f3 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
int ret;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
- return ret;
+ goto out_ret;
ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr),
dev);
if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev);
- return ret;
+ goto out_ret;
}
return 0;
+out_ret:
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
}
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev);
@@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_rxq_deinit(priv->netdev);
free_percpu(dev->tstats);
-}
-
-static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
-{
- hfi1_ipoib_netdev_dtor(dev);
- free_netdev(dev);
+ dev->tstats = NULL;
}
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
@@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
priv->port_num = port_num;
priv->netdev_ops = netdev->netdev_ops;
- netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
-
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
rc = hfi1_ipoib_txreq_init(priv);
if (rc) {
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
return rc;
}
rc = hfi1_ipoib_rxq_init(netdev);
if (rc) {
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
+ hfi1_ipoib_txreq_deinit(priv);
return rc;
}
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index f4010890309f..d6bbdb8fcb50 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
dd_dev_warn(priv->dd,
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
__func__, tx->sdma_status,
- le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
tx->txq->sde->this_idx);
}
@@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
{
struct hfi1_devdata *dd = txp->dd;
struct sdma_txreq *txreq = &tx->txreq;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
u16 pkt_bytes =
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
int ret;
@@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
struct rdma_ah_attr *ah_attr = txp->ah_attr;
@@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
if (likely(!ret)) {
tx_ok:
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
@@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
hfi1_ipoib_check_queue_depth(txq);
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
if (!netdev_xmit_more())
@@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
{
struct net_device *dev = priv->netdev;
u32 tx_ring_size, tx_item_size;
- int i;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ int i, j;
/*
* Ring holds 1 less than tx_ring_size
@@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct ipoib_txreq *tx;
+ tx_ring = &txq->tx_ring;
iowait_init(&txq->wait,
0,
hfi1_ipoib_flush_txq,
@@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size, tx_item_size,
- GFP_KERNEL, priv->dd->node);
+ kvzalloc_node(array_size(tx_ring_size, tx_item_size),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
txq->tx_ring.max_items = tx_ring_size;
- txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.shift = ilog2(tx_item_size);
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ kzalloc_node(sizeof(*tx->sdma_hdr),
+ GFP_KERNEL, priv->dd->node);
netif_tx_napi_add(dev, &txq->napi,
hfi1_ipoib_poll_tx_ring,
@@ -746,7 +754,10 @@ free_txqs:
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(&txq->napi);
- kfree(txq->tx_ring.items);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
@@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
{
- int i;
+ int i, j;
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(&txq->napi);
hfi1_ipoib_drain_tx_ring(txq);
- kfree(txq->tx_ring.items);
+ for (j = 0; j < tx_ring->max_items; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1c3d97229988..93b1650eacfa 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew)
- break;
+ return;
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 0a3b28142c05..41c272980f91 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
};
static const struct attribute_group port_diagc_group = {
- .name = "linkcontrol",
+ .name = "diag_counters",
.attrs = port_diagc_attributes,
};
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3305f2744bfa..ae50b56e8913 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -3073,6 +3073,8 @@ do_write:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
+ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
+ goto inv_err;
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 368959ae9a8c..df03d84c6868 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
}
-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
-{
- return &qp->orq[qp->orq_put % qp->attrs.orq_size];
-}
-
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
- struct siw_sqe *orq_e = orq_get_tail(qp);
+ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 60116f20653c..875ea6f1b04a 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
spin_lock_irqsave(&qp->orq_lock, flags);
- rreq = orq_get_current(qp);
-
/* free current orq entry */
+ rreq = orq_get_current(qp);
WRITE_ONCE(rreq->flags, 0);
+ qp->orq_get++;
+
if (qp->tx_ctx.orq_fence) {
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
@@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
rv = -EPROTO;
goto out;
}
- /* resume SQ processing */
+ /* resume SQ processing, if possible */
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- rreq = orq_get_tail(qp);
+
+ /* SQ processing was stopped because of a full ORQ */
+ rreq = orq_get_free(qp);
if (unlikely(!rreq)) {
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
rv = -EPROTO;
@@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
resume_tx = 1;
} else if (siw_orq_empty(qp)) {
+ /*
+ * SQ processing was stopped by fenced work request.
+ * Resume since all previous Read's are now completed.
+ */
qp->tx_ctx.orq_fence = 0;
resume_tx = 1;
- } else {
- pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
- qp_id(qp), qp->orq_get, qp->orq_put);
- rv = -EPROTO;
}
}
- qp->orq_get++;
out:
spin_unlock_irqrestore(&qp->orq_lock, flags);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index a3dd2cb6d5c9..54ef367b074a 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto err_atomic;
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 7c3f98e57889..759b85f03331 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev)
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
kfree(clt);
}
@@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
return ERR_PTR(-ENOMEM);
}
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
clt->paths_num = paths_num;
@@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
+ device_initialize(&clt->dev);
- clt->dev.class = rtrs_clt_dev_class;
- clt->dev.release = rtrs_clt_dev_release;
err = dev_set_name(&clt->dev, "%s", sessname);
if (err)
- goto err;
+ goto err_put;
+
/*
* Suppress user space notification until
* sysfs files are created
*/
dev_set_uevent_suppress(&clt->dev, true);
- err = device_register(&clt->dev);
- if (err) {
- put_device(&clt->dev);
- goto err;
- }
+ err = device_add(&clt->dev);
+ if (err)
+ goto err_put;
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
err = -ENOMEM;
- goto err_dev;
+ goto err_del;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
- goto err_dev;
+ goto err_del;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
-err_dev:
- device_unregister(&clt->dev);
-err:
+err_del:
+ device_del(&clt->dev);
+err_put:
free_percpu(clt->pcpu_path);
- kfree(clt);
+ put_device(&clt->dev);
return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt_sess *clt)
{
- free_permits(clt);
free_percpu(clt->pcpu_path);
- mutex_destroy(&clt->paths_ev_mutex);
- mutex_destroy(&clt->paths_mutex);
- /* release callback will free clt in last put */
+
+ /*
+ * release callback will free clt and destroy mutexes in last put
+ */
device_unregister(&clt->dev);
}
@@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt)
rtrs_clt_destroy_path_files(clt_path, NULL);
kobject_put(&clt_path->kobj);
}
+ free_permits(clt);
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e174e853f8a4..285b766e4e70 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&host->target_lock);
/*
- * Wait for tl_err and target port removal tasks.
+ * srp_queue_remove_work() queues a call to
+ * srp_remove_target(). The latter function cancels
+ * target->tl_err_work so waiting for the remove works to
+ * finish is sufficient.
*/
- flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ccaeb2426385..c3139bc2aa0d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
+ /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
+ if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+ }
+
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 0c607da9ee10..9417ee0b1eff 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on HAVE_CLK
+ depends on HAS_IOMEM && HAVE_CLK
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad on your Samsung mobile
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index eda1b23002b5..d1f5354d5ea2 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -1858,7 +1858,7 @@ static void applespi_drain_reads(struct applespi_data *applespi)
spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
}
-static int applespi_remove(struct spi_device *spi)
+static void applespi_remove(struct spi_device *spi)
{
struct applespi_data *applespi = spi_get_drvdata(spi);
@@ -1871,8 +1871,6 @@ static int applespi_remove(struct spi_device *spi)
applespi_drain_reads(applespi);
debugfs_remove_recursive(applespi->debugfs_root);
-
- return 0;
}
static void applespi_shutdown(struct spi_device *spi)
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 6e51c9bc619f..91e44d4c66f7 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -87,13 +87,11 @@ static int adxl34x_spi_probe(struct spi_device *spi)
return 0;
}
-static int adxl34x_spi_remove(struct spi_device *spi)
+static void adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = spi_get_drvdata(spi);
adxl34x_remove(ac);
-
- return 0;
}
static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 47af62c12267..e1758d5ffe42 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
return 0;
}
-static int elan_enable_power(struct elan_tp_data *data)
+static int elan_set_power(struct elan_tp_data *data, bool on)
{
int repeat = ETP_RETRY_COUNT;
int error;
- error = regulator_enable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to enable regulator: %d\n", error);
- return error;
- }
-
do {
- error = data->ops->power_control(data->client, true);
+ error = data->ops->power_control(data->client, on);
if (error >= 0)
return 0;
msleep(30);
} while (--repeat > 0);
- dev_err(&data->client->dev, "failed to enable power: %d\n", error);
- return error;
-}
-
-static int elan_disable_power(struct elan_tp_data *data)
-{
- int repeat = ETP_RETRY_COUNT;
- int error;
-
- do {
- error = data->ops->power_control(data->client, false);
- if (!error) {
- error = regulator_disable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to disable regulator: %d\n",
- error);
- /* Attempt to power the chip back up */
- data->ops->power_control(data->client, true);
- break;
- }
-
- return 0;
- }
-
- msleep(30);
- } while (--repeat > 0);
-
- dev_err(&data->client->dev, "failed to disable power: %d\n", error);
+ dev_err(&data->client->dev, "failed to set power %s: %d\n",
+ on ? "on" : "off", error);
return error;
}
@@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev)
/* Enable wake from IRQ */
data->irq_wake = (enable_irq_wake(client->irq) == 0);
} else {
- ret = elan_disable_power(data);
+ ret = elan_set_power(data, false);
+ if (ret)
+ goto err;
+
+ ret = regulator_disable(data->vcc);
+ if (ret) {
+ dev_err(dev, "error %d disabling regulator\n", ret);
+ /* Attempt to power the chip back up */
+ elan_set_power(data, true);
+ }
}
+err:
mutex_unlock(&data->sysfs_mutex);
return ret;
}
@@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev)
struct elan_tp_data *data = i2c_get_clientdata(client);
int error;
- if (device_may_wakeup(dev) && data->irq_wake) {
+ if (!device_may_wakeup(dev)) {
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(dev, "error %d enabling regulator\n", error);
+ goto err;
+ }
+ } else if (data->irq_wake) {
disable_irq_wake(client->irq);
data->irq_wake = false;
}
- error = elan_enable_power(data);
+ error = elan_set_power(data, true);
if (error) {
dev_err(dev, "power up when resuming failed: %d\n", error);
goto err;
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index a472489ccbad..164f6c757f6b 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
"Marking SMBus companion %s as gone\n",
dev_name(&smbdev->client->dev));
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &smbdev->psmouse->ps2dev.serio->dev);
serio_rescan(smbdev->psmouse->ps2dev.serio);
} else {
list_del(&smbdev->node);
@@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
kfree(smbdev);
} else {
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev);
psmouse_dbg(smbdev->psmouse,
"posting removal request for SMBus companion %s\n",
dev_name(&smbdev->client->dev));
@@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
if (smbdev->client) {
/* We have our companion device */
+ if (!device_link_add(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev,
+ DL_FLAG_STATELESS))
+ psmouse_warn(psmouse,
+ "failed to set up link with iSMBus companion %s\n",
+ dev_name(&smbdev->client->dev));
return 0;
}
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index fcb1b646436a..1581f6ef0927 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1787,15 +1787,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
- /* Verify that a device really has an endpoint */
- if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
+ err = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &endpoint, NULL);
+ if (err) {
dev_err(&intf->dev,
- "interface has %d endpoints, but must have minimum 1\n",
- intf->cur_altsetting->desc.bNumEndpoints);
- err = -EINVAL;
+ "interface has no int in endpoints, but must have minimum 1\n");
goto fail3;
}
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a25a77dd9a32..bed68a68f330 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1411,13 +1411,11 @@ static int ads7846_probe(struct spi_device *spi)
return 0;
}
-static int ads7846_remove(struct spi_device *spi)
+static void ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = spi_get_drvdata(spi);
ads7846_stop(ts);
-
- return 0;
}
static struct spi_driver ads7846_driver = {
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
index 2aec41eb76b7..5d7db84f2749 100644
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ b/drivers/input/touchscreen/cyttsp4_spi.c
@@ -164,12 +164,10 @@ static int cyttsp4_spi_probe(struct spi_device *spi)
return PTR_ERR_OR_ZERO(ts);
}
-static int cyttsp4_spi_remove(struct spi_device *spi)
+static void cyttsp4_spi_remove(struct spi_device *spi)
{
struct cyttsp4 *ts = spi_get_drvdata(spi);
cyttsp4_remove(ts);
-
- return 0;
}
static struct spi_driver cyttsp4_spi_driver = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a3bfc7a41679..752e8ba4fecb 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts)
}
#ifdef ACPI_GPIO_SUPPORT
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-static const struct x86_cpu_id baytrail_cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
- {}
-};
-
-static inline bool is_byt(void)
-{
- const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
-
- return !!id;
-}
-
static const struct acpi_gpio_params first_gpio = { 0, 0, false };
static const struct acpi_gpio_params second_gpio = { 1, 0, false };
@@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
const struct acpi_gpio_mapping *gpio_mapping = NULL;
struct device *dev = &ts->client->dev;
LIST_HEAD(resources);
- int ret;
+ int irq, ret;
ts->gpio_count = 0;
ts->gpio_int_idx = -1;
@@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
acpi_dev_free_resource_list(&resources);
+ /*
+ * CHT devices should have a GpioInt + a regular GPIO ACPI resource.
+ * Some CHT devices have a bug (where the also is bogus Interrupt
+ * resource copied from a previous BYT based generation). i2c-core-acpi
+ * will use the non-working Interrupt resource, fix this up.
+ */
+ if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) {
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq > 0 && irq != ts->client->irq) {
+ dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq);
+ ts->client->irq = irq;
+ }
+ }
+
if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_first_gpios;
@@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
gpio_mapping = acpi_goodix_reset_only_gpios;
- } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+ } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_last_gpios;
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index a2f55920b9b2..555dfe98b3c4 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -64,11 +64,9 @@ static int tsc2005_probe(struct spi_device *spi)
tsc2005_cmd);
}
-static int tsc2005_remove(struct spi_device *spi)
+static void tsc2005_remove(struct spi_device *spi)
{
tsc200x_remove(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 78d2ee99f37a..1b58611c8084 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm)
* extensions)
*/
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
+ if (!wm->touch_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->touch_dev, wm);
wm->touch_dev->dev.parent = wm->dev;
wm->touch_dev->dev.platform_data = pdata;
@@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm)
return 0;
touch_reg_err:
platform_device_put(wm->touch_dev);
-touch_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
return ret;
}
@@ -639,8 +635,6 @@ touch_err:
static void wm97xx_unregister_touch(struct wm97xx *wm)
{
platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
}
static int _wm97xx_probe(struct wm97xx *wm)
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 7c82c4f5fa6b..8bd03278ad9a 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -135,7 +135,7 @@ struct point_coord {
struct touch_event {
__le16 status;
- u8 finger_cnt;
+ u8 finger_mask;
u8 time_stamp;
struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM];
};
@@ -322,11 +322,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541)
static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
const struct point_coord *p)
{
+ u16 x, y;
+
+ if (unlikely(!(p->sub_status &
+ (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) {
+ dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n",
+ p->sub_status);
+ return;
+ }
+
+ x = le16_to_cpu(p->x);
+ y = le16_to_cpu(p->y);
+
input_mt_slot(bt541->input_dev, slot);
- input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true);
- touchscreen_report_pos(bt541->input_dev, &bt541->prop,
- le16_to_cpu(p->x), le16_to_cpu(p->y), true);
- input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width);
+ if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER,
+ !(p->sub_status & SUB_BIT_UP))) {
+ touchscreen_report_pos(bt541->input_dev,
+ &bt541->prop, x, y, true);
+ input_report_abs(bt541->input_dev,
+ ABS_MT_TOUCH_MAJOR, p->width);
+ dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n",
+ slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move",
+ x, y);
+ } else {
+ dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n",
+ slot, x, y);
+ }
}
static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
@@ -334,6 +355,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
struct bt541_ts_data *bt541 = bt541_handler;
struct i2c_client *client = bt541->client;
struct touch_event touch_event;
+ unsigned long finger_mask;
int error;
int i;
@@ -346,10 +368,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
goto out;
}
- for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++)
- if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST)
- zinitix_report_finger(bt541, i,
- &touch_event.point_coord[i]);
+ finger_mask = touch_event.finger_mask;
+ for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) {
+ const struct point_coord *p = &touch_event.point_coord[i];
+
+ /* Only process contacts that are actually reported */
+ if (p->sub_status & SUB_BIT_EXIST)
+ zinitix_report_finger(bt541, i, p);
+ }
input_mt_sync_frame(bt541->input_dev);
input_sync(bt541->input_dev);
@@ -571,8 +597,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
#ifdef CONFIG_OF
static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt402" },
+ { .compatible = "zinitix,bt403" },
+ { .compatible = "zinitix,bt404" },
+ { .compatible = "zinitix,bt412" },
+ { .compatible = "zinitix,bt413" },
+ { .compatible = "zinitix,bt431" },
+ { .compatible = "zinitix,bt432" },
+ { .compatible = "zinitix,bt531" },
{ .compatible = "zinitix,bt532" },
+ { .compatible = "zinitix,bt538" },
{ .compatible = "zinitix,bt541" },
+ { .compatible = "zinitix,bt548" },
+ { .compatible = "zinitix,bt554" },
+ { .compatible = "zinitix,at100" },
{ }
};
MODULE_DEVICE_TABLE(of, zinitix_of_match);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3eb68fa1b8cc..c79a0df090c0 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,8 +144,8 @@ config IOMMU_DMA
select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
-# Shared Virtual Addressing library
-config IOMMU_SVA_LIB
+# Shared Virtual Addressing
+config IOMMU_SVA
bool
select IOASID
@@ -379,7 +379,7 @@ config ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bc7f730edbb0..44475a9b3eea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,6 +27,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 416815a525d6..bb95edf74415 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -14,6 +14,7 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ffc89c4fb120..47108ed44fbb 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -110,6 +110,7 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0)
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index dc338acf3338..7bfe37e52e21 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/cc_platform.h>
+#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -657,6 +658,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
@@ -834,6 +845,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
+ udelay(10);
}
if (WARN_ON(i >= LOOP_TIMEOUT))
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index b1bf4125b0f7..6608d1717574 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
dom = container_of(pgtable, struct protection_domain, iop);
- /* Update data structure */
- amd_iommu_domain_clr_pt_root(dom);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(dom);
-
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
put_pages_list(&freelist);
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 461f1844ed1f..a18b549951bb 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_INT_MASK | \
+ (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+ MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
while (status & AMD_IOMMU_INT_MASK) {
- /* Enable EVT and PPR and GA interrupts again */
+ /* Enable interrupt sources again */
writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#endif
+ if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+ pr_info_ratelimited("IOMMU event log overflow\n");
+ amd_iommu_restart_event_logging(iommu);
+ }
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index a737ba5f727e..22ddd05bbdcd 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -340,14 +340,12 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
ret = PTR_ERR(bond->smmu_mn);
- goto err_free_pasid;
+ goto err_free_bond;
}
list_add(&bond->list, &master->bonds);
return &bond->sva;
-err_free_pasid:
- iommu_sva_free_pasid(mm);
err_free_bond:
kfree(bond);
return ERR_PTR(ret);
@@ -377,7 +375,6 @@ void arm_smmu_sva_unbind(struct iommu_sva *handle)
if (refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
- iommu_sva_free_pasid(bond->mm);
kfree(bond);
}
mutex_unlock(&sva_lock);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 247d0f2d5fdf..39a06d245f12 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -52,7 +52,7 @@ config INTEL_IOMMU_SVM
select PCI_PRI
select MMU_NOTIFIER
select IOASID
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 92fea3fbbb11..1ce1741a7fa4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
- if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
@@ -4781,7 +4781,7 @@ attach_failed:
link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
return ret;
}
@@ -4811,7 +4811,7 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index f912fe45bea2..a67319597884 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
fn, &intel_ir_domain_ops,
iommu);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
- goto out_free_bitmap;
+ goto out_free_fwnode;
}
iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain,
@@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n");
- goto out_free_bitmap;
+ goto out_free_ir_domain;
}
}
@@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0;
+out_free_ir_domain:
+ if (iommu->ir_msi_domain)
+ irq_domain_remove(iommu->ir_msi_domain);
+ iommu->ir_msi_domain = NULL;
+ irq_domain_remove(iommu->ir_domain);
+ iommu->ir_domain = NULL;
+out_free_fwnode:
+ irq_domain_free_fwnode(fn);
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 5b5d69b04fcc..51ac2096b3da 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -514,11 +514,6 @@ static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
}
-static void intel_svm_free_pasid(struct mm_struct *mm)
-{
- iommu_sva_free_pasid(mm);
-}
-
static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct device *dev,
struct mm_struct *mm,
@@ -662,8 +657,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
- /* Drop a PASID reference and free it if no reference. */
- intel_svm_free_pasid(mm);
}
out:
return ret;
@@ -1047,8 +1040,6 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void
}
sva = intel_svm_bind_mm(iommu, dev, mm, flags);
- if (IS_ERR_OR_NULL(sva))
- intel_svm_free_pasid(mm);
mutex_unlock(&pasid_mutex);
return sva;
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 50ee27bbd04e..a786c034907c 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -2,7 +2,7 @@
/*
* I/O Address Space ID allocator. There is one global IOASID space, split into
* subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
- * free IOASIDs with ioasid_alloc and ioasid_put.
+ * free IOASIDs with ioasid_alloc() and ioasid_free().
*/
#include <linux/ioasid.h>
#include <linux/module.h>
@@ -15,7 +15,6 @@ struct ioasid_data {
struct ioasid_set *set;
void *private;
struct rcu_head rcu;
- refcount_t refs;
};
/*
@@ -315,7 +314,6 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
data->set = set;
data->private = private;
- refcount_set(&data->refs, 1);
/*
* Custom allocator needs allocator data to perform platform specific
@@ -348,34 +346,11 @@ exit_free:
EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
- * ioasid_get - obtain a reference to the IOASID
- */
-void ioasid_get(ioasid_t ioasid)
-{
- struct ioasid_data *ioasid_data;
-
- spin_lock(&ioasid_allocator_lock);
- ioasid_data = xa_load(&active_allocator->xa, ioasid);
- if (ioasid_data)
- refcount_inc(&ioasid_data->refs);
- else
- WARN_ON(1);
- spin_unlock(&ioasid_allocator_lock);
-}
-EXPORT_SYMBOL_GPL(ioasid_get);
-
-/**
- * ioasid_put - Release a reference to an ioasid
+ * ioasid_free - Free an ioasid
* @ioasid: the ID to remove
- *
- * Put a reference to the IOASID, free it when the number of references drops to
- * zero.
- *
- * Return: %true if the IOASID was freed, %false otherwise.
*/
-bool ioasid_put(ioasid_t ioasid)
+void ioasid_free(ioasid_t ioasid)
{
- bool free = false;
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
@@ -385,10 +360,6 @@ bool ioasid_put(ioasid_t ioasid)
goto exit_unlock;
}
- free = refcount_dec_and_test(&ioasid_data->refs);
- if (!free)
- goto exit_unlock;
-
active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
/* Custom allocator needs additional steps to free the xa element */
if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
@@ -398,9 +369,8 @@ bool ioasid_put(ioasid_t ioasid)
exit_unlock:
spin_unlock(&ioasid_allocator_lock);
- return free;
}
-EXPORT_SYMBOL_GPL(ioasid_put);
+EXPORT_SYMBOL_GPL(ioasid_free);
/**
* ioasid_find - Find IOASID data
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index bd41405d34e9..106506143896 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -18,8 +18,7 @@ static DECLARE_IOASID_SET(iommu_sva_pasid);
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
- * available in mm->pasid, and must be released with iommu_sva_free_pasid().
- * @min must be greater than 0, because 0 indicates an unused mm->pasid.
+ * available in mm->pasid and will be available for the lifetime of the mm.
*
* Returns 0 on success and < 0 on error.
*/
@@ -33,38 +32,24 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
- if (mm->pasid) {
- if (mm->pasid >= min && mm->pasid <= max)
- ioasid_get(mm->pasid);
- else
+ /* Is a PASID already associated with this mm? */
+ if (pasid_valid(mm->pasid)) {
+ if (mm->pasid < min || mm->pasid >= max)
ret = -EOVERFLOW;
- } else {
- pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
- if (pasid == INVALID_IOASID)
- ret = -ENOMEM;
- else
- mm->pasid = pasid;
+ goto out;
}
+
+ pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
+ if (!pasid_valid(pasid))
+ ret = -ENOMEM;
+ else
+ mm_pasid_set(mm, pasid);
+out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
-/**
- * iommu_sva_free_pasid - Release the mm's PASID
- * @mm: the mm
- *
- * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
- */
-void iommu_sva_free_pasid(struct mm_struct *mm)
-{
- mutex_lock(&iommu_sva_lock);
- if (ioasid_put(mm->pasid))
- mm->pasid = 0;
- mutex_unlock(&iommu_sva_lock);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
-
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 031155010ca8..8909ea1094e3 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -9,7 +9,6 @@
#include <linux/mm_types.h>
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
-void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
/* I/O Page fault */
@@ -17,7 +16,7 @@ struct device;
struct iommu_fault;
struct iopf_queue;
-#ifdef CONFIG_IOMMU_SVA_LIB
+#ifdef CONFIG_IOMMU_SVA
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
@@ -28,7 +27,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
-#else /* CONFIG_IOMMU_SVA_LIB */
+#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
{
return -ENODEV;
@@ -64,5 +63,5 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
-#endif /* CONFIG_IOMMU_SVA_LIB */
+#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8b86406b7162..107dcf5938d6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
static void dev_iommu_free(struct device *dev)
{
- iommu_fwspec_free(dev);
- kfree(dev->iommu);
+ struct dev_iommu *param = dev->iommu;
+
dev->iommu = NULL;
+ if (param->fwspec) {
+ fwnode_handle_put(param->fwspec->iommu_fwnode);
+ kfree(param->fwspec);
+ }
+ kfree(param);
}
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
@@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
return ret;
}
-/**
- * iommu_group_for_each_dev - iterate over each device in the group
- * @group: the group
- * @data: caller opaque data to be passed to callback function
- * @fn: caller supplied callback function
- *
- * This function is called by group users to iterate over group devices.
- * Callers should hold a reference count to the group during callback.
- * The group->mutex is held across callbacks, which will block calls to
- * iommu_group_add/remove_device.
- */
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
return ret;
}
-
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
+ * @drvdata: opaque data pointer to pass to bind callback
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 91749654fd49..980e4af3f06b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
}
/**
- * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * omap_iommu_prepare - prepare() dev_pm_ops implementation
* @dev: iommu device
*
* This function performs the necessary checks to determine if the IOMMU
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e900e3c46903..2561ce8a2ce8 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
- if (!mc)
+ if (!mc) {
+ put_device(&pdev->dev);
return NULL;
+ }
return mc->smmu;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 7038957f4a77..680d2fcf2686 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -430,6 +430,14 @@ config QCOM_PDC
Power Domain Controller driver to manage and configure wakeup
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+config QCOM_MPM
+ tristate "QCOM MPM"
+ depends on ARCH_QCOM
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ MSM Power Manager driver to manage and configure wakeup
+ IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+
config CSKY_MPINTC
bool
depends on CSKY
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c1f611cbfbf8..1f8990f812f1 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
+obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2543ef65825b..12dd48727a15 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -24,7 +24,7 @@
* - Default "this CPU" register view and explicit per-CPU views
*
* In addition, this driver also handles FIQs, as these are routed to the same
- * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
+ * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
* performance counters (TODO).
*
* Implementation notes:
@@ -52,9 +52,12 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-vgic-info.h>
#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
#include <linux/limits.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+#include <asm/apple_m1_pmu.h>
+#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
@@ -62,20 +65,22 @@
#include <dt-bindings/interrupt-controller/apple-aic.h>
/*
- * AIC registers (MMIO)
+ * AIC v1 registers (MMIO)
*/
#define AIC_INFO 0x0004
-#define AIC_INFO_NR_HW GENMASK(15, 0)
+#define AIC_INFO_NR_IRQ GENMASK(15, 0)
#define AIC_CONFIG 0x0010
#define AIC_WHOAMI 0x2000
#define AIC_EVENT 0x2004
-#define AIC_EVENT_TYPE GENMASK(31, 16)
+#define AIC_EVENT_DIE GENMASK(31, 24)
+#define AIC_EVENT_TYPE GENMASK(23, 16)
#define AIC_EVENT_NUM GENMASK(15, 0)
-#define AIC_EVENT_TYPE_HW 1
+#define AIC_EVENT_TYPE_FIQ 0 /* Software use */
+#define AIC_EVENT_TYPE_IRQ 1
#define AIC_EVENT_TYPE_IPI 4
#define AIC_EVENT_IPI_OTHER 1
#define AIC_EVENT_IPI_SELF 2
@@ -91,34 +96,73 @@
#define AIC_IPI_SELF BIT(31)
#define AIC_TARGET_CPU 0x3000
-#define AIC_SW_SET 0x4000
-#define AIC_SW_CLR 0x4080
-#define AIC_MASK_SET 0x4100
-#define AIC_MASK_CLR 0x4180
#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
+#define AIC_MAX_IRQ 0x400
+
+/*
+ * AIC v2 registers (MMIO)
+ */
+
+#define AIC2_VERSION 0x0000
+#define AIC2_VERSION_VER GENMASK(7, 0)
+
+#define AIC2_INFO1 0x0004
+#define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
+#define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
+
+#define AIC2_INFO2 0x0008
+
+#define AIC2_INFO3 0x000c
+#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
+#define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
+
+#define AIC2_RESET 0x0010
+#define AIC2_RESET_RESET BIT(0)
+
+#define AIC2_CONFIG 0x0014
+#define AIC2_CONFIG_ENABLE BIT(0)
+#define AIC2_CONFIG_PREFER_PCPU BIT(28)
+
+#define AIC2_TIMEOUT 0x0028
+#define AIC2_CLUSTER_PRIO 0x0030
+#define AIC2_DELAY_GROUPS 0x0100
+
+#define AIC2_IRQ_CFG 0x2000
+
+/*
+ * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
+ *
+ * Repeat for each die:
+ * IRQ_CFG: u32 * MAX_IRQS
+ * SW_SET: u32 * (MAX_IRQS / 32)
+ * SW_CLR: u32 * (MAX_IRQS / 32)
+ * MASK_SET: u32 * (MAX_IRQS / 32)
+ * MASK_CLR: u32 * (MAX_IRQS / 32)
+ * HW_STATE: u32 * (MAX_IRQS / 32)
+ *
+ * This is followed by a set of event registers, each 16K page aligned.
+ * The first one is the AP event register we will use. Unfortunately,
+ * the actual implemented die count is not specified anywhere in the
+ * capability registers, so we have to explicitly specify the event
+ * register as a second reg entry in the device tree to remain
+ * forward-compatible.
+ */
+
+#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
+#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
+
#define MASK_REG(x) (4 * ((x) >> 5))
#define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
/*
* IMP-DEF sysregs that control FIQ sources
- * Note: sysreg-based IPIs are not supported yet.
*/
-/* Core PMC control register */
-#define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0)
-#define PMCR0_IMODE GENMASK(10, 8)
-#define PMCR0_IMODE_OFF 0
-#define PMCR0_IMODE_PMI 1
-#define PMCR0_IMODE_AIC 2
-#define PMCR0_IMODE_HALT 3
-#define PMCR0_IMODE_FIQ 4
-#define PMCR0_IACT BIT(11)
-
/* IPI request registers */
#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
@@ -155,7 +199,18 @@
#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
#define UPMSR_IACT BIT(0)
-#define AIC_NR_FIQ 4
+/* MPIDR fields */
+#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
+#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
+
+#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
+ FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, irq))
+#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, x))
+#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
+#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
+#define AIC_NR_FIQ 6
#define AIC_NR_SWIPI 32
/*
@@ -173,12 +228,81 @@
#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
+DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+
+struct aic_info {
+ int version;
+
+ /* Register offsets */
+ u32 event;
+ u32 target_cpu;
+ u32 irq_cfg;
+ u32 sw_set;
+ u32 sw_clr;
+ u32 mask_set;
+ u32 mask_clr;
+
+ u32 die_stride;
+
+ /* Features */
+ bool fast_ipi;
+};
+
+static const struct aic_info aic1_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+};
+
+static const struct aic_info aic1_fipi_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+
+ .fast_ipi = true,
+};
+
+static const struct aic_info aic2_info = {
+ .version = 2,
+
+ .irq_cfg = AIC2_IRQ_CFG,
+
+ .fast_ipi = true,
+};
+
+static const struct of_device_id aic_info_match[] = {
+ {
+ .compatible = "apple,t8103-aic",
+ .data = &aic1_fipi_info,
+ },
+ {
+ .compatible = "apple,aic",
+ .data = &aic1_info,
+ },
+ {
+ .compatible = "apple,aic2",
+ .data = &aic2_info,
+ },
+ {}
+};
+
struct aic_irq_chip {
void __iomem *base;
+ void __iomem *event;
struct irq_domain *hw_domain;
struct irq_domain *ipi_domain;
- int nr_hw;
- int ipi_hwirq;
+ struct {
+ cpumask_t aff;
+ } *fiq_aff[AIC_NR_FIQ];
+
+ int nr_irq;
+ int max_irq;
+ int nr_die;
+ int max_die;
+
+ struct aic_info info;
};
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
@@ -206,18 +330,24 @@ static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
static void aic_irq_mask(struct irq_data *d)
{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)),
- MASK_BIT(irqd_to_hwirq(d)));
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
}
static void aic_irq_unmask(struct irq_data *d)
{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq),
- MASK_BIT(irqd_to_hwirq(d)));
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
}
static void aic_irq_eoi(struct irq_data *d)
@@ -240,12 +370,12 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
* We cannot use a relaxed read here, as reads from DMA buffers
* need to be ordered after the IRQ fires.
*/
- event = readl(ic->base + AIC_EVENT);
+ event = readl(ic->event + ic->info.event);
type = FIELD_GET(AIC_EVENT_TYPE, event);
irq = FIELD_GET(AIC_EVENT_NUM, event);
- if (type == AIC_EVENT_TYPE_HW)
- generic_handle_domain_irq(aic_irqc->hw_domain, irq);
+ if (type == AIC_EVENT_TYPE_IRQ)
+ generic_handle_domain_irq(aic_irqc->hw_domain, event);
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
aic_handle_ipi(regs);
else if (event != 0)
@@ -272,12 +402,14 @@ static int aic_irq_set_affinity(struct irq_data *d,
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
int cpu;
+ BUG_ON(!ic->info.target_cpu);
+
if (force)
cpu = cpumask_first(mask_val);
else
cpu = cpumask_any_and(mask_val, cpu_online_mask);
- aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu));
+ aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
@@ -301,15 +433,21 @@ static struct irq_chip aic_chip = {
.irq_set_type = aic_irq_set_type,
};
+static struct irq_chip aic2_chip = {
+ .name = "AIC2",
+ .irq_mask = aic_irq_mask,
+ .irq_unmask = aic_irq_unmask,
+ .irq_eoi = aic_irq_eoi,
+ .irq_set_type = aic_irq_set_type,
+};
+
/*
* FIQ irqchip
*/
static unsigned long aic_fiq_get_idx(struct irq_data *d)
{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
-
- return irqd_to_hwirq(d) - ic->nr_hw;
+ return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
}
static void aic_fiq_set_mask(struct irq_data *d)
@@ -387,17 +525,21 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
*/
if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
- pr_err_ratelimited("Fast IPI fired. Acking.\n");
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_handle_ipi(regs);
+ } else {
+ pr_err_ratelimited("Fast IPI fired. Acking.\n");
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ }
}
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
@@ -405,24 +547,23 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
- (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
- /*
- * Not supported yet, let's figure out how to handle this when
- * we implement these proprietary performance counters. For now,
- * just mask it and move on.
- */
- pr_err_ratelimited("PMC FIQ fired. Masking.\n");
- sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
- FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
+ if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ int irq;
+ if (cpumask_test_cpu(smp_processor_id(),
+ &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
+ irq = AIC_CPU_PMU_P;
+ else
+ irq = AIC_CPU_PMU_E;
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(irq));
}
if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
@@ -456,13 +597,29 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
irq_hw_number_t hw)
{
struct aic_irq_chip *ic = id->host_data;
+ u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
+ struct irq_chip *chip = &aic_chip;
- if (hw < ic->nr_hw) {
- irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data,
+ if (ic->info.version == 2)
+ chip = &aic2_chip;
+
+ if (type == AIC_EVENT_TYPE_IRQ) {
+ irq_domain_set_info(id, irq, hw, chip, id->host_data,
handle_fasteoi_irq, NULL, NULL);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} else {
- irq_set_percpu_devid(irq);
+ int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
+
+ switch (fiq) {
+ case AIC_CPU_PMU_P:
+ case AIC_CPU_PMU_E:
+ irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
+ break;
+ default:
+ irq_set_percpu_devid(irq);
+ break;
+ }
+
irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
handle_percpu_devid_irq, NULL, NULL);
}
@@ -476,32 +633,46 @@ static int aic_irq_domain_translate(struct irq_domain *id,
unsigned int *type)
{
struct aic_irq_chip *ic = id->host_data;
+ u32 *args;
+ u32 die = 0;
- if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode))
+ if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
+ !is_of_node(fwspec->fwnode))
return -EINVAL;
+ args = &fwspec->param[1];
+
+ if (fwspec->param_count == 4) {
+ die = args[0];
+ args++;
+ }
+
switch (fwspec->param[0]) {
case AIC_IRQ:
- if (fwspec->param[1] >= ic->nr_hw)
+ if (die >= ic->nr_die)
return -EINVAL;
- *hwirq = fwspec->param[1];
+ if (args[0] >= ic->nr_irq)
+ return -EINVAL;
+ *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
break;
case AIC_FIQ:
- if (fwspec->param[1] >= AIC_NR_FIQ)
+ if (die != 0)
+ return -EINVAL;
+ if (args[0] >= AIC_NR_FIQ)
return -EINVAL;
- *hwirq = ic->nr_hw + fwspec->param[1];
+ *hwirq = AIC_FIQ_HWIRQ(args[0]);
/*
* In EL1 the non-redirected registers are the guest's,
* not EL2's, so remap the hwirqs to match.
*/
if (!is_kernel_in_hyp_mode()) {
- switch (fwspec->param[1]) {
+ switch (args[0]) {
case AIC_TMR_GUEST_PHYS:
- *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS;
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
break;
case AIC_TMR_GUEST_VIRT:
- *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT;
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
break;
case AIC_TMR_HV_PHYS:
case AIC_TMR_HV_VIRT:
@@ -515,7 +686,7 @@ static int aic_irq_domain_translate(struct irq_domain *id,
return -EINVAL;
}
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ *type = args[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
@@ -564,6 +735,22 @@ static const struct irq_domain_ops aic_irq_domain_ops = {
* IPI irqchip
*/
+static void aic_ipi_send_fast(int cpu)
+{
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 my_mpidr = read_cpuid_mpidr();
+ u64 cluster = MPIDR_CLUSTER(mpidr);
+ u64 idx = MPIDR_CPU(mpidr);
+
+ if (MPIDR_CLUSTER(my_mpidr) == cluster)
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
+ SYS_IMP_APL_IPI_RR_LOCAL_EL1);
+ else
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
+ SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
+ isb();
+}
+
static void aic_ipi_mask(struct irq_data *d)
{
u32 irq_bit = BIT(irqd_to_hwirq(d));
@@ -589,8 +776,12 @@ static void aic_ipi_unmask(struct irq_data *d)
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
* No barriers needed here since this is a self-IPI.
*/
- if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
- aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
+ if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(smp_processor_id());
+ else
+ aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
+ }
}
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
@@ -618,8 +809,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
smp_mb__after_atomic();
if (!(pending & irq_bit) &&
- (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
- send |= AIC_IPI_SEND_CPU(cpu);
+ (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(cpu);
+ else
+ send |= AIC_IPI_SEND_CPU(cpu);
+ }
}
/*
@@ -651,8 +846,16 @@ static void aic_handle_ipi(struct pt_regs *regs)
/*
* Ack the IPI. We need to order this after the AIC event read, but
* that is enforced by normal MMIO ordering guarantees.
+ *
+ * For the Fast IPI case, this needs to be ordered before the vIPI
+ * handling below, so we need to isb();
*/
- aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
+ if (static_branch_likely(&use_fast_ipi)) {
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ isb();
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
+ }
/*
* The mask read does not need to be ordered. Only we can change
@@ -680,7 +883,8 @@ static void aic_handle_ipi(struct pt_regs *regs)
* No ordering needed here; at worst this just changes the timing of
* when the next IPI will be delivered.
*/
- aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ if (!static_branch_likely(&use_fast_ipi))
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
@@ -767,20 +971,27 @@ static int aic_init_cpu(unsigned int cpu)
/* Commit all of the above */
isb();
- /*
- * Make sure the kernel's idea of logical CPU order is the same as AIC's
- * If we ever end up with a mismatch here, we will have to introduce
- * a mapping table similar to what other irqchip drivers do.
- */
- WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
+ if (aic_irqc->info.version == 1) {
+ /*
+ * Make sure the kernel's idea of logical CPU order is the same as AIC's
+ * If we ever end up with a mismatch here, we will have to introduce
+ * a mapping table similar to what other irqchip drivers do.
+ */
+ WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
- /*
- * Always keep IPIs unmasked at the hardware level (except auto-masking
- * by AIC during processing). We manage masks at the vIPI level.
- */
- aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
- aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
- aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ /*
+ * Always keep IPIs unmasked at the hardware level (except auto-masking
+ * by AIC during processing). We manage masks at the vIPI level.
+ * These registers only exist on AICv1, AICv2 always uses fast IPIs.
+ */
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ }
+ }
/* Initialize the local mask state */
__this_cpu_write(aic_fiq_unmasked, 0);
@@ -794,68 +1005,193 @@ static struct gic_kvm_info vgic_info __initdata = {
.no_hw_deactivation = true,
};
+static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
+{
+ int i, n;
+ u32 fiq;
+
+ if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
+ WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
+ return;
+
+ n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
+ if (WARN_ON(n < 0))
+ return;
+
+ ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
+ if (!ic->fiq_aff[fiq])
+ return;
+
+ for (i = 0; i < n; i++) {
+ struct device_node *cpu_node;
+ u32 cpu_phandle;
+ int cpu;
+
+ if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
+ continue;
+
+ cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
+ }
+}
+
static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
{
- int i;
+ int i, die;
+ u32 off, start_off;
void __iomem *regs;
- u32 info;
struct aic_irq_chip *irqc;
+ struct device_node *affs;
+ const struct of_device_id *match;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
return -EIO;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
+ if (!irqc) {
+ iounmap(regs);
return -ENOMEM;
+ }
- aic_irqc = irqc;
irqc->base = regs;
- info = aic_ic_read(irqc, AIC_INFO);
- irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
+ match = of_match_node(aic_info_match, node);
+ if (!match)
+ goto err_unmap;
- irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- irqc->nr_hw + AIC_NR_FIQ,
- &aic_irq_domain_ops, irqc);
- if (WARN_ON(!irqc->hw_domain)) {
- iounmap(irqc->base);
- kfree(irqc);
- return -ENODEV;
+ irqc->info = *(struct aic_info *)match->data;
+
+ aic_irqc = irqc;
+
+ switch (irqc->info.version) {
+ case 1: {
+ u32 info;
+
+ info = aic_ic_read(irqc, AIC_INFO);
+ irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
+ irqc->max_irq = AIC_MAX_IRQ;
+ irqc->nr_die = irqc->max_die = 1;
+
+ off = start_off = irqc->info.target_cpu;
+ off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
+
+ irqc->event = irqc->base;
+
+ break;
}
+ case 2: {
+ u32 info1, info3;
+
+ info1 = aic_ic_read(irqc, AIC2_INFO1);
+ info3 = aic_ic_read(irqc, AIC2_INFO3);
+
+ irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
+ irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
+ irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
+ irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
+
+ off = start_off = irqc->info.irq_cfg;
+ off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
+
+ irqc->event = of_iomap(node, 1);
+ if (WARN_ON(!irqc->event))
+ goto err_unmap;
+
+ break;
+ }
+ }
+
+ irqc->info.sw_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
+ irqc->info.sw_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
+ irqc->info.mask_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
+ irqc->info.mask_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
+
+ if (irqc->info.fast_ipi)
+ static_branch_enable(&use_fast_ipi);
+ else
+ static_branch_disable(&use_fast_ipi);
+
+ irqc->info.die_stride = off - start_off;
+
+ irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
+ &aic_irq_domain_ops, irqc);
+ if (WARN_ON(!irqc->hw_domain))
+ goto err_unmap;
irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
- if (aic_init_smp(irqc, node)) {
- irq_domain_remove(irqc->hw_domain);
- iounmap(irqc->base);
- kfree(irqc);
- return -ENODEV;
+ if (aic_init_smp(irqc, node))
+ goto err_remove_domain;
+
+ affs = of_get_child_by_name(node, "affinities");
+ if (affs) {
+ struct device_node *chld;
+
+ for_each_child_of_node(affs, chld)
+ build_fiq_affinity(irqc, chld);
}
set_handle_irq(aic_handle_irq);
set_handle_fiq(aic_handle_fiq);
- for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
- aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX);
- for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
- aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX);
- for (i = 0; i < irqc->nr_hw; i++)
- aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1);
+ off = 0;
+ for (die = 0; die < irqc->nr_die; die++) {
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
+ if (irqc->info.target_cpu)
+ for (i = 0; i < irqc->nr_irq; i++)
+ aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
+ off += irqc->info.die_stride;
+ }
+
+ if (irqc->info.version == 2) {
+ u32 config = aic_ic_read(irqc, AIC2_CONFIG);
+
+ config |= AIC2_CONFIG_ENABLE;
+ aic_ic_write(irqc, AIC2_CONFIG, config);
+ }
if (!is_kernel_in_hyp_mode())
pr_info("Kernel running in EL1, mapping interrupts");
+ if (static_branch_likely(&use_fast_ipi))
+ pr_info("Using Fast IPIs");
+
cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
"irqchip/apple-aic/ipi:starting",
aic_init_cpu, NULL);
vgic_set_kvm_info(&vgic_info);
- pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
- irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI);
+ pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
+ irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->hw_domain);
+err_unmap:
+ if (irqc->event && irqc->event != irqc->base)
+ iounmap(irqc->event);
+ iounmap(irqc->base);
+ kfree(irqc);
+ return -ENODEV;
}
-IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 5cc268880f8e..46a3aa60e50e 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d25b7a864bbb..cd772973114a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4856,6 +4856,38 @@ static struct syscore_ops its_syscore_ops = {
.resume = its_restore_enable,
};
+static void __init __iomem *its_map_one(struct resource *res, int *err)
+{
+ void __iomem *its_base;
+ u32 val;
+
+ its_base = ioremap(res->start, SZ_64K);
+ if (!its_base) {
+ pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
+ *err = -ENODEV;
+ goto out_unmap;
+ }
+
+ *err = its_force_quiescent(its_base);
+ if (*err) {
+ pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
+ goto out_unmap;
+ }
+
+ return its_base;
+
+out_unmap:
+ iounmap(its_base);
+ return NULL;
+}
+
static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
{
struct irq_domain *inner_domain;
@@ -4963,29 +4995,14 @@ static int __init its_probe_one(struct resource *res,
{
struct its_node *its;
void __iomem *its_base;
- u32 val, ctlr;
u64 baser, tmp, typer;
struct page *page;
+ u32 ctlr;
int err;
- its_base = ioremap(res->start, SZ_64K);
- if (!its_base) {
- pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
- return -ENOMEM;
- }
-
- val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
- if (val != 0x30 && val != 0x40) {
- pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
- err = -ENODEV;
- goto out_unmap;
- }
-
- err = its_force_quiescent(its_base);
- if (err) {
- pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
- goto out_unmap;
- }
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
pr_info("ITS %pR\n", res);
@@ -5241,13 +5258,31 @@ static int its_cpu_memreserve_lpi(unsigned int cpu)
out:
/* Last CPU being brought up gets to issue the cleanup */
- if (cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
+ if (!IS_ENABLED(CONFIG_SMP) ||
+ cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
return ret;
}
+/* Mark all the BASER registers as invalid before they get reprogrammed */
+static int __init its_reset_one(struct resource *res)
+{
+ void __iomem *its_base;
+ int err, i;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++)
+ gits_write_baser(0, its_base + GITS_BASER + (i << 3));
+
+ iounmap(its_base);
+ return 0;
+}
+
static const struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", },
{},
@@ -5258,6 +5293,26 @@ static int __init its_of_probe(struct device_node *node)
struct device_node *np;
struct resource res;
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ int err;
+
+ if (!of_device_is_available(np) ||
+ !of_property_read_bool(np, "msi-controller") ||
+ of_address_to_resource(np, 0, &res))
+ continue;
+
+ err = its_reset_one(&res);
+ if (err)
+ return err;
+ }
+
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
@@ -5420,11 +5475,35 @@ dom_err:
return err;
}
+static int __init its_acpi_reset(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct resource res;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ res = (struct resource) {
+ .start = its_entry->base_address,
+ .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return its_reset_one(&res);
+}
+
static void __init its_acpi_probe(void)
{
acpi_table_parse_srat_its();
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- gic_acpi_parse_madt_its, 0);
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_acpi_reset, 0) > 0)
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ gic_acpi_parse_madt_its, 0);
acpi_its_srat_maps_free();
}
#else
@@ -5438,6 +5517,9 @@ int __init its_lpi_memreserve_init(void)
if (!efi_enabled(EFI_CONFIG_TABLES))
return 0;
+ if (list_empty(&its_nodes))
+ return 0;
+
gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"irqchip/arm/gicv3/memreserve:online",
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5e935d97207d..0efe1a9a9f3b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1211,7 +1211,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- wmb();
+ dsb(ishst);
for_each_cpu(cpu, mask) {
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b8bb46c65a97..58ba835bee1f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -34,6 +34,7 @@
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
@@ -66,7 +67,6 @@ union gic_base {
};
struct gic_chip_data {
- struct irq_chip chip;
union gic_base dist_base;
union gic_base cpu_base;
void __iomem *raw_dist_base;
@@ -397,18 +397,15 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static const struct irq_chip gic_chip = {
- .irq_mask = gic_mask_irq,
- .irq_unmask = gic_unmask_irq,
- .irq_eoi = gic_eoi_irq,
- .irq_set_type = gic_set_type,
- .irq_retrigger = gic_retrigger,
- .irq_get_irqchip_state = gic_irq_get_irqchip_state,
- .irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .flags = IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
-};
+static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+
+ if (gic->domain->dev)
+ seq_printf(p, gic->domain->dev->of_node->name);
+ else
+ seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
+}
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
@@ -799,8 +796,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
unsigned int cpu;
+ if (unlikely(gic != &gic_data[0]))
+ return -EINVAL;
+
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
else
@@ -880,6 +881,39 @@ static __init void gic_smp_init(void)
#define gic_ipi_send_mask NULL
#endif
+static const struct irq_chip gic_chip = {
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_print_chip = gic_irq_print_chip,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gic_chip_mode1 = {
+ .name = "GICv2",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
#ifdef CONFIG_BL_SWITCHER
/*
* gic_send_sgi - send a SGI directly to given CPU interface number
@@ -1024,15 +1058,19 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
struct gic_chip_data *gic = d->host_data;
struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
+ const struct irq_chip *chip;
+
+ chip = (static_branch_likely(&supports_deactivate_key) &&
+ gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
switch (hw) {
case 0 ... 31:
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
break;
default:
- irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
irqd_set_single_target(irqd);
@@ -1127,26 +1165,6 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
- const char *name, bool use_eoimode1)
-{
- /* Initialize irq_chip */
- gic->chip = gic_chip;
- gic->chip.name = name;
- gic->chip.parent_device = dev;
-
- if (use_eoimode1) {
- gic->chip.irq_mask = gic_eoimode1_mask_irq;
- gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
- gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
- }
-
- if (gic == &gic_data[0]) {
- gic->chip.irq_set_affinity = gic_set_affinity;
- gic->chip.ipi_send_mask = gic_ipi_send_mask;
- }
-}
-
static int gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
@@ -1246,7 +1264,6 @@ error:
static int __init __gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
- char *name;
int i, ret;
if (WARN_ON(!gic || gic->domain))
@@ -1266,18 +1283,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
- if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
- name = kasprintf(GFP_KERNEL, "GICv2");
- gic_init_chip(gic, NULL, name, true);
- } else {
- name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
- gic_init_chip(gic, NULL, name, false);
- }
-
ret = gic_init_bases(gic, handle);
- if (ret)
- kfree(name);
- else if (gic == &gic_data[0])
+ if (gic == &gic_data[0])
gic_smp_init();
return ret;
@@ -1460,8 +1467,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
if (!*gic)
return -ENOMEM;
- gic_init_chip(*gic, dev, dev->of_node->name, false);
-
ret = gic_of_setup(*gic, dev->of_node);
if (ret)
return ret;
@@ -1472,6 +1477,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
return ret;
}
+ irq_domain_set_pm_device((*gic)->domain, dev);
irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
return 0;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index e86ff743e98c..80aaea82468a 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -61,7 +61,6 @@
#define CHAN_MAX_NUM 0x8
struct intmux_irqchip_data {
- struct irq_chip chip;
u32 saved_reg;
int chanidx;
int irq;
@@ -114,7 +113,7 @@ static void imx_intmux_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
-static struct irq_chip imx_intmux_irq_chip = {
+static struct irq_chip imx_intmux_irq_chip __ro_after_init = {
.name = "intmux",
.irq_mask = imx_intmux_irq_mask,
.irq_unmask = imx_intmux_irq_unmask,
@@ -126,7 +125,7 @@ static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
struct intmux_irqchip_data *data = h->host_data;
irq_set_chip_data(irq, data);
- irq_set_chip_and_handler(irq, &data->chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
return 0;
}
@@ -241,8 +240,6 @@ static int imx_intmux_probe(struct platform_device *pdev)
}
for (i = 0; i < channum; i++) {
- data->irqchip_data[i].chip = imx_intmux_irq_chip;
- data->irqchip_data[i].chip.parent_device = &pdev->dev;
data->irqchip_data[i].chanidx = i;
data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
@@ -260,6 +257,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
goto out;
}
data->irqchip_data[i].domain = domain;
+ irq_domain_set_pm_device(domain, &pdev->dev);
/* disable all interrupt sources of this channel firstly */
writel_relaxed(0, data->regs + CHANIER(i));
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 32562b7e681b..e3801c4a77ed 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -241,7 +241,7 @@ static int pch_msi_init(struct device_node *node,
return 0;
err_map:
- kfree(priv->msi_map);
+ bitmap_free(priv->msi_map);
err_priv:
kfree(priv);
return ret;
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index a29357f39450..4d70a857133f 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/exception.h>
@@ -25,8 +26,8 @@
struct lpc32xx_irq_chip {
void __iomem *base;
+ phys_addr_t addr;
struct irq_domain *domain;
- struct irq_chip chip;
};
static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
@@ -118,6 +119,24 @@ static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ if (ic == lpc32xx_mic_irqc)
+ seq_printf(p, "%08x.mic", ic->addr);
+ else
+ seq_printf(p, "%08x.sic", ic->addr);
+}
+
+static const struct irq_chip lpc32xx_chip = {
+ .irq_ack = lpc32xx_irq_ack,
+ .irq_mask = lpc32xx_irq_mask,
+ .irq_unmask = lpc32xx_irq_unmask,
+ .irq_set_type = lpc32xx_irq_set_type,
+ .irq_print_chip = lpc32xx_irq_print_chip,
+};
+
static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
{
struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
@@ -153,7 +172,7 @@ static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
struct lpc32xx_irq_chip *ic = id->host_data;
irq_set_chip_data(virq, ic);
- irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_noprobe(virq);
@@ -183,6 +202,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
if (!irqc)
return -ENOMEM;
+ irqc->addr = addr;
irqc->base = of_iomap(node, 0);
if (!irqc->base) {
pr_err("%pOF: unable to map registers\n", node);
@@ -190,21 +210,11 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
return -EINVAL;
}
- irqc->chip.irq_ack = lpc32xx_irq_ack;
- irqc->chip.irq_mask = lpc32xx_irq_mask;
- irqc->chip.irq_unmask = lpc32xx_irq_unmask;
- irqc->chip.irq_set_type = lpc32xx_irq_set_type;
- if (is_mic)
- irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
- else
- irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
-
irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
&lpc32xx_irq_domain_ops, irqc);
if (!irqc->domain) {
pr_err("unable to add irq domain\n");
iounmap(irqc->base);
- kfree(irqc->chip.name);
kfree(irqc);
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index d90ff0b92480..2aaa9aad3e87 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -16,7 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
-#define NUM_CHANNEL 8
+#define MAX_NUM_CHANNEL 64
#define MAX_INPUT_MUX 256
#define REG_EDGE_POL 0x00
@@ -26,6 +26,8 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
+/* Used for s4 chips */
+#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@@ -51,15 +53,22 @@ static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
unsigned int channel,
unsigned long hwirq);
static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
struct irq_ctl_ops {
void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
unsigned int channel, unsigned long hwirq);
void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+ int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
};
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
+ unsigned int nr_channels;
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
@@ -68,28 +77,44 @@ struct meson_gpio_irq_params {
struct irq_ctl_ops ops;
};
-#define INIT_MESON_COMMON(irqs, init, sel) \
+#define INIT_MESON_COMMON(irqs, init, sel, type) \
.nr_hwirq = irqs, \
.ops = { \
.gpio_irq_init = init, \
.gpio_irq_sel_pin = sel, \
+ .gpio_irq_set_type = type, \
},
#define INIT_MESON8_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
- meson8_gpio_irq_sel_pin) \
+ meson8_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
.edge_single_offset = 0, \
.pol_low_offset = 16, \
.pin_sel_mask = 0xff, \
+ .nr_channels = 8, \
#define INIT_MESON_A1_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
- meson_a1_gpio_irq_sel_pin) \
+ meson_a1_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
.support_edge_both = true, \
.edge_both_offset = 16, \
.edge_single_offset = 8, \
.pol_low_offset = 0, \
.pin_sel_mask = 0x7f, \
+ .nr_channels = 8, \
+
+#define INIT_MESON_S4_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 12, \
static const struct meson_gpio_irq_params meson8_params = {
INIT_MESON8_COMMON_DATA(134)
@@ -121,6 +146,10 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
+static const struct meson_gpio_irq_params s4_params = {
+ INIT_MESON_S4_COMMON_DATA(82)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -130,14 +159,15 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
+ { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ }
};
struct meson_gpio_irq_controller {
const struct meson_gpio_irq_params *params;
void __iomem *base;
- u32 channel_irqs[NUM_CHANNEL];
- DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+ u32 channel_irqs[MAX_NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
spinlock_t lock;
};
@@ -207,8 +237,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
- idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
- if (idx >= NUM_CHANNEL) {
+ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
+ if (idx >= ctl->params->nr_channels) {
spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
@@ -256,9 +286,8 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
clear_bit(idx, ctl->channel_map);
}
-static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
- unsigned int type,
- u32 *channel_hwirq)
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
{
u32 val = 0;
unsigned int idx;
@@ -299,6 +328,51 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
return 0;
}
+/*
+ * gpio irq relative registers for s4
+ * -PADCTRL_GPIO_IRQ_CTRL0
+ * bit[31]: enable/disable all the irq lines
+ * bit[12-23]: single edge trigger
+ * bit[0-11]: polarity trigger
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[X]
+ * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2
+ * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1
+ * where X = 1-6
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[7]
+ * bit[0-11]: both edge trigger
+ */
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ val |= BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
+ BIT(ctl->params->edge_both_offset + idx), val);
+ return 0;
+ }
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->pol_low_offset + idx);
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->edge_single_offset + idx);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ BIT(idx) | BIT(12 + idx), val);
+ return 0;
+};
+
static unsigned int meson_gpio_irq_type_output(unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
@@ -323,7 +397,7 @@ static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
int ret;
- ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+ ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq);
if (ret)
return ret;
@@ -450,10 +524,10 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i
ret = of_property_read_variable_u32_array(node,
"amlogic,channel-interrupts",
ctl->channel_irqs,
- NUM_CHANNEL,
- NUM_CHANNEL);
+ ctl->params->nr_channels,
+ ctl->params->nr_channels);
if (ret < 0) {
- pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+ pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels);
return ret;
}
@@ -507,7 +581,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
}
pr_info("%d to %d gpio interrupt mux initialized\n",
- ctl->params->nr_hwirq, NUM_CHANNEL);
+ ctl->params->nr_hwirq, ctl->params->nr_channels);
return 0;
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index 870f9866b8da..ef3d3646ccc2 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#define PIC_CAUSE 0x0
#define PIC_MASK 0x4
@@ -29,7 +30,7 @@ struct mvebu_pic {
void __iomem *base;
u32 parent_irq;
struct irq_domain *domain;
- struct irq_chip irq_chip;
+ struct platform_device *pdev;
};
static void mvebu_pic_reset(struct mvebu_pic *pic)
@@ -66,6 +67,20 @@ static void mvebu_pic_unmask_irq(struct irq_data *d)
writel(reg, pic->base + PIC_MASK);
}
+static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, dev_name(&pic->pdev->dev));
+}
+
+static const struct irq_chip mvebu_pic_chip = {
+ .irq_mask = mvebu_pic_mask_irq,
+ .irq_unmask = mvebu_pic_unmask_irq,
+ .irq_eoi = mvebu_pic_eoi_irq,
+ .irq_print_chip = mvebu_pic_print_chip,
+};
+
static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
@@ -73,8 +88,7 @@ static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
irq_set_percpu_devid(virq);
irq_set_chip_data(virq, pic);
- irq_set_chip_and_handler(virq, &pic->irq_chip,
- handle_percpu_devid_irq);
+ irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_probe(virq);
@@ -120,22 +134,16 @@ static int mvebu_pic_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct mvebu_pic *pic;
- struct irq_chip *irq_chip;
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
if (!pic)
return -ENOMEM;
+ pic->pdev = pdev;
pic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pic->base))
return PTR_ERR(pic->base);
- irq_chip = &pic->irq_chip;
- irq_chip->name = dev_name(&pdev->dev);
- irq_chip->irq_mask = mvebu_pic_mask_irq;
- irq_chip->irq_unmask = mvebu_pic_unmask_irq;
- irq_chip->irq_eoi = mvebu_pic_eoi_irq;
-
pic->parent_irq = irq_of_parse_and_map(node, 0);
if (pic->parent_irq <= 0) {
dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index ba4759b3e269..94230306e0ee 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -107,6 +107,7 @@ static int __init nvic_of_init(struct device_node *node,
if (!nvic_irq_domain) {
pr_warn("Failed to allocate irq domain\n");
+ iounmap(nvic_base);
return -ENOMEM;
}
@@ -116,6 +117,7 @@ static int __init nvic_of_init(struct device_node *node,
if (ret) {
pr_warn("Failed to allocate irq chips\n");
irq_domain_remove(nvic_irq_domain);
+ iounmap(nvic_base);
return ret;
}
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
new file mode 100644
index 000000000000..eea5a753618c
--- /dev/null
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
+ * which is commonly found on Qualcomm SoCs built on the RPM architecture.
+ * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
+ * asleep, and wakes up the AP when one of those interrupts occurs. This driver
+ * doesn't directly access physical MPM registers though. Instead, the access
+ * is bridged via a piece of internal memory (SRAM) that is accessible to both
+ * AP and RPM. This piece of memory is called 'vMPM' in the driver.
+ *
+ * When SoC is awake, the vMPM is owned by AP and the register setup by this
+ * driver all happens on vMPM. When AP is about to get power collapsed, the
+ * driver sends a mailbox notification to RPM, which will take over the vMPM
+ * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
+ * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
+ * Then AP start owning vMPM again.
+ *
+ * vMPM register map:
+ *
+ * 31 0
+ * +--------------------------------+
+ * | TIMER0 | 0x00
+ * +--------------------------------+
+ * | TIMER1 | 0x04
+ * +--------------------------------+
+ * | ENABLE0 | 0x08
+ * +--------------------------------+
+ * | ... | ...
+ * +--------------------------------+
+ * | ENABLEn |
+ * +--------------------------------+
+ * | FALLING_EDGE0 |
+ * +--------------------------------+
+ * | ... |
+ * +--------------------------------+
+ * | STATUSn |
+ * +--------------------------------+
+ *
+ * n = DIV_ROUND_UP(pin_cnt, 32)
+ *
+ */
+
+#define MPM_REG_ENABLE 0
+#define MPM_REG_FALLING_EDGE 1
+#define MPM_REG_RISING_EDGE 2
+#define MPM_REG_POLARITY 3
+#define MPM_REG_STATUS 4
+
+/* MPM pin map to GIC hwirq */
+struct mpm_gic_map {
+ int pin;
+ irq_hw_number_t hwirq;
+};
+
+struct qcom_mpm_priv {
+ void __iomem *base;
+ raw_spinlock_t lock;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+ struct mpm_gic_map *maps;
+ unsigned int map_cnt;
+ unsigned int reg_stride;
+ struct irq_domain *domain;
+ struct generic_pm_domain genpd;
+};
+
+static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ return readl_relaxed(priv->base + offset);
+}
+
+static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index, u32 val)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ writel_relaxed(val, priv->base + offset);
+
+ /* Ensure the write is completed */
+ wmb();
+}
+
+static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
+ __assign_bit(shift, &val, en);
+ qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void qcom_mpm_mask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, false);
+
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+}
+
+static void qcom_mpm_unmask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, true);
+
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+}
+
+static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
+ unsigned int index, unsigned int shift)
+{
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, reg, index);
+ __assign_bit(shift, &val, set);
+ qcom_mpm_write(priv, reg, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
+
+ if (!d->parent_data)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_mpm_chip = {
+ .name = "mpm",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = qcom_mpm_mask,
+ .irq_unmask = qcom_mpm_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = qcom_mpm_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
+{
+ struct mpm_gic_map *maps = priv->maps;
+ int i;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ if (maps[i].pin == pin)
+ return &maps[i];
+ }
+
+ return NULL;
+}
+
+static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct qcom_mpm_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct mpm_gic_map *map;
+ irq_hw_number_t pin;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
+ &qcom_mpm_chip, priv);
+ if (ret)
+ return ret;
+
+ map = get_mpm_gic_map(priv, pin);
+ if (map == NULL)
+ return irq_domain_disconnect_hierarchy(domain->parent, virq);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = map->hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_mpm_ops = {
+ .alloc = qcom_mpm_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+/* Triggered by RPM when system resumes from deep sleep */
+static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
+{
+ struct qcom_mpm_priv *priv = dev_id;
+ unsigned long enable, pending;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ raw_spin_lock_irqsave(&priv->lock, flags);
+ enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
+ pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
+ pending &= enable;
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ for_each_set_bit(j, &pending, 32) {
+ unsigned int pin = 32 * i + j;
+ struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
+ struct irq_data *d = &desc->irq_data;
+
+ if (!irqd_is_level_type(d))
+ irq_set_irqchip_state(d->irq,
+ IRQCHIP_STATE_PENDING, true);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static int mpm_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
+ genpd);
+ int i, ret;
+
+ for (i = 0; i < priv->reg_stride; i++)
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+
+ /* Notify RPM to write vMPM into HW */
+ ret = mbox_send_message(priv->mbox_chan, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++)
+ if (maps[i].hwirq == hwirq)
+ return true;
+
+ return false;
+}
+
+static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
+{
+ struct platform_device *pdev = of_find_device_by_node(np);
+ struct device *dev = &pdev->dev;
+ struct irq_domain *parent_domain;
+ struct generic_pm_domain *genpd;
+ struct qcom_mpm_priv *priv;
+ unsigned int pin_cnt;
+ int i, irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
+ if (ret) {
+ dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
+ return ret;
+ }
+
+ priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
+
+ ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
+ if (ret < 0) {
+ dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
+ return ret;
+ }
+
+ if (ret % 2) {
+ dev_err(dev, "invalid qcom,mpm-pin-map\n");
+ return -EINVAL;
+ }
+
+ priv->map_cnt = ret / 2;
+ priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
+ GFP_KERNEL);
+ if (!priv->maps)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ u32 pin, hwirq;
+
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
+
+ if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
+ dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
+ pin, hwirq);
+ continue;
+ }
+
+ priv->maps[i].pin = pin;
+ priv->maps[i].hwirq = hwirq;
+ }
+
+ raw_spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!priv->base)
+ return PTR_ERR(priv->base);
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ genpd = &priv->genpd;
+ genpd->flags = GENPD_FLAG_IRQ_SAFE;
+ genpd->power_off = mpm_pd_power_off;
+
+ genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
+ if (!genpd->name)
+ return -ENOMEM;
+
+ ret = pm_genpd_init(genpd, NULL, false);
+ if (ret) {
+ dev_err(dev, "failed to init genpd: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(np, genpd);
+ if (ret) {
+ dev_err(dev, "failed to add genpd provider: %d\n", ret);
+ goto remove_genpd;
+ }
+
+ priv->mbox_client.dev = dev;
+ priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
+ if (IS_ERR(priv->mbox_chan)) {
+ ret = PTR_ERR(priv->mbox_chan);
+ dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
+ return ret;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(dev, "failed to find MPM parent domain\n");
+ ret = -ENXIO;
+ goto free_mbox;
+ }
+
+ priv->domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
+ of_node_to_fwnode(np), &qcom_mpm_ops, priv);
+ if (!priv->domain) {
+ dev_err(dev, "failed to create MPM domain\n");
+ ret = -ENOMEM;
+ goto free_mbox;
+ }
+
+ irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
+ "qcom_mpm", priv);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto remove_domain;
+ }
+
+ return 0;
+
+remove_domain:
+ irq_domain_remove(priv->domain);
+free_mbox:
+ mbox_free_channel(priv->mbox_chan);
+remove_genpd:
+ pm_genpd_remove(genpd);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
+IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index fd9f275592d2..50a56820c99b 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -62,7 +62,7 @@ static struct irq_chip realtek_ictl_irq = {
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- irq_set_chip_and_handler(hw, &realtek_ictl_irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
return 0;
}
@@ -76,16 +76,20 @@ static void realtek_irq_dispatch(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *domain;
- unsigned int pending;
+ unsigned long pending;
+ unsigned int soc_int;
chained_irq_enter(chip, desc);
pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR));
+
if (unlikely(!pending)) {
spurious_interrupt();
goto out;
}
+
domain = irq_desc_get_handler_data(desc);
- generic_handle_domain_irq(domain, __ffs(pending));
+ for_each_set_bit(soc_int, &pending, 32)
+ generic_handle_domain_irq(domain, soc_int);
out:
chained_irq_exit(chip, desc);
@@ -95,7 +99,8 @@ out:
* SoC interrupts are cascaded to MIPS CPU interrupts according to the
* interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
* the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
- * thus go into 4 IRRs.
+ * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
+ * disconnected. Routing values {1..15} connect to output lines {0..14}.
*/
static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
{
@@ -134,7 +139,7 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
of_node_put(cpu_ictl);
cpu_int = be32_to_cpup(imap + 2);
- if (cpu_int > 7)
+ if (cpu_int > 7 || cpu_int < 2)
return -EINVAL;
if (!(mips_irqs_set & BIT(cpu_int))) {
@@ -143,7 +148,8 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
mips_irqs_set |= BIT(cpu_int);
}
- regs[(soc_int * 4) / 32] |= cpu_int << (soc_int * 4) % 32;
+ /* Use routing values (1..6) for CPU interrupts (2..7) */
+ regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
imap += 3;
}
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 37f9a4499fdb..e83756aca14e 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -508,7 +508,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
irq_chip = &p->irq_chip;
irq_chip->name = "intc-irqpin";
- irq_chip->parent_device = dev;
irq_chip->irq_mask = disable_fn;
irq_chip->irq_unmask = enable_fn;
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
@@ -523,6 +522,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
goto err0;
}
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
if (p->shared_irqs) {
/* request one shared interrupt */
if (devm_request_irq(dev, p->irq[0].requested_irq,
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 909325f88239..1ee5e9941f67 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -188,13 +188,14 @@ static int irqc_probe(struct platform_device *pdev)
p->gc->reg_base = p->cpu_int_base;
p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
- p->gc->chip_types[0].chip.parent_device = dev;
p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
/* request interrupts one by one */
for (k = 0; k < p->number_of_irqs; k++) {
if (devm_request_irq(dev, p->irq[k].requested_irq,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 259065d271ef..bb87e4c3b88e 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -44,8 +44,8 @@
* Each hart context has a vector of interrupt enable bits associated with it.
* There's one bit for each interrupt source.
*/
-#define ENABLE_BASE 0x2000
-#define ENABLE_PER_HART 0x80
+#define CONTEXT_ENABLE_BASE 0x2000
+#define CONTEXT_ENABLE_SIZE 0x80
/*
* Each hart context has a set of control registers associated with it. Right
@@ -53,7 +53,7 @@
* take an interrupt, and a register to claim interrupts.
*/
#define CONTEXT_BASE 0x200000
-#define CONTEXT_PER_HART 0x1000
+#define CONTEXT_SIZE 0x1000
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
@@ -81,17 +81,21 @@ static int plic_parent_irq __ro_after_init;
static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void plic_toggle(struct plic_handler *handler,
- int hwirq, int enable)
+static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
{
- u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
+ u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
+}
+
+static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
+{
+ raw_spin_lock(&handler->enable_lock);
+ __plic_toggle(handler->enable_base, hwirq, enable);
raw_spin_unlock(&handler->enable_lock);
}
@@ -324,8 +328,18 @@ static int __init plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != RV_IRQ_EXT)
+ if (parent.args[0] != RV_IRQ_EXT) {
+ /* Disable S-mode enable bits if running in M-mode. */
+ if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
+ void __iomem *enable_base = priv->regs +
+ CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
+
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ __plic_toggle(enable_base, hwirq, 0);
+ }
continue;
+ }
hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) {
@@ -361,11 +375,11 @@ static int __init plic_init(struct device_node *node,
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
- handler->hart_base =
- priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ handler->hart_base = priv->regs + CONTEXT_BASE +
+ i * CONTEXT_SIZE;
raw_spin_lock_init(&handler->enable_lock);
- handler->enable_base =
- priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
@@ -398,3 +412,4 @@ out_free_priv:
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b7cb2da71888..9d18f47040eb 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -214,6 +214,48 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip },
};
+static const struct stm32_desc_irq stm32mp13_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
+ { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
+ { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
+ { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
+ { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
+ { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip },
+ { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip },
+ { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip },
+ { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip },
+ { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip },
+ { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip },
+ { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip },
+ { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip },
+ { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip },
+ { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip },
+ { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip },
+ { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
+ { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct },
+};
+
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.exti_banks = stm32mp1_exti_banks,
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
@@ -221,6 +263,13 @@ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
};
+static const struct stm32_exti_drv_data stm32mp13_drv_data = {
+ .exti_banks = stm32mp1_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+ .desc_irqs = stm32mp13_desc_irq,
+ .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq),
+};
+
static const struct
stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data,
irq_hw_number_t hwirq)
@@ -922,6 +971,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
/* platform driver only for MP1 */
static const struct of_device_id stm32_exti_ids[] = {
{ .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
{},
};
MODULE_DEVICE_TABLE(of, stm32_exti_ids);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index f032db23b30f..b2d61d4f6fe6 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -19,14 +19,15 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#define IRQ_MASK 0x4
#define IRQ_STATUS 0x8
struct ts4800_irq_data {
void __iomem *base;
+ struct platform_device *pdev;
struct irq_domain *domain;
- struct irq_chip irq_chip;
};
static void ts4800_irq_mask(struct irq_data *d)
@@ -47,12 +48,25 @@ static void ts4800_irq_unmask(struct irq_data *d)
writew(reg & ~mask, data->base + IRQ_MASK);
}
+static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, "%s", dev_name(&data->pdev->dev));
+}
+
+static const struct irq_chip ts4800_chip = {
+ .irq_mask = ts4800_irq_mask,
+ .irq_unmask = ts4800_irq_unmask,
+ .irq_print_chip = ts4800_irq_print_chip,
+};
+
static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct ts4800_irq_data *data = d->host_data;
- irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq);
irq_set_chip_data(irq, data);
irq_set_noprobe(irq);
@@ -92,13 +106,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct ts4800_irq_data *data;
- struct irq_chip *irq_chip;
int parent_irq;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->pdev = pdev;
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
@@ -111,11 +125,6 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq_chip = &data->irq_chip;
- irq_chip->name = dev_name(&pdev->dev);
- irq_chip->irq_mask = ts4800_irq_mask;
- irq_chip->irq_unmask = ts4800_irq_unmask;
-
data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index f2757b6aecc8..ba543ed9c154 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -7,12 +7,12 @@
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/seq_file.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -34,14 +34,12 @@
/**
* struct fpga_irq_data - irq data container for the FPGA IRQ controller
* @base: memory offset in virtual memory
- * @chip: chip container for this instance
* @domain: IRQ domain for this instance
* @valid: mask for valid IRQs on this controller
* @used_irqs: number of active IRQs on this controller
*/
struct fpga_irq_data {
void __iomem *base;
- struct irq_chip chip;
u32 valid;
struct irq_domain *domain;
u8 used_irqs;
@@ -67,6 +65,20 @@ static void fpga_irq_unmask(struct irq_data *d)
writel(mask, f->base + IRQ_ENABLE_SET);
}
+static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, irq_domain_get_of_node(f->domain)->name);
+}
+
+static const struct irq_chip fpga_chip = {
+ .irq_ack = fpga_irq_mask,
+ .irq_mask = fpga_irq_mask,
+ .irq_unmask = fpga_irq_unmask,
+ .irq_print_chip = fpga_irq_print_chip,
+};
+
static void fpga_irq_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -116,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
* Keep iterating over all registered FPGA IRQ controllers until there are
* no pending interrupts.
*/
-asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
{
int i, handled;
@@ -135,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
if (!(f->valid & BIT(hwirq)))
return -EPERM;
irq_set_chip_data(irq, f);
- irq_set_chip_and_handler(irq, &f->chip,
- handle_level_irq);
+ irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq);
irq_set_probe(irq);
return 0;
}
@@ -146,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
- int parent_irq, u32 valid, struct device_node *node)
+static void __init fpga_irq_init(void __iomem *base, int parent_irq,
+ u32 valid, struct device_node *node)
{
struct fpga_irq_data *f;
int i;
@@ -158,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
}
f = &fpga_irq_devices[fpga_irq_id];
f->base = base;
- f->chip.name = name;
- f->chip.irq_ack = fpga_irq_mask;
- f->chip.irq_mask = fpga_irq_mask;
- f->chip.irq_unmask = fpga_irq_unmask;
f->valid = valid;
if (parent_irq != -1) {
@@ -169,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
f);
}
- /* This will also allocate irq descriptors */
- f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
+ f->domain = irq_domain_add_linear(node, fls(valid),
&fpga_irqdomain_ops, f);
/* This will allocate all valid descriptors in the linear case */
for (i = 0; i < fls(valid); i++)
if (valid & BIT(i)) {
- if (!irq_start)
- irq_create_mapping(f->domain, i);
+ /* Is this still required? */
+ irq_create_mapping(f->domain, i);
f->used_irqs++;
}
pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
- fpga_irq_id, name, base, f->used_irqs);
+ fpga_irq_id, node->name, base, f->used_irqs);
if (parent_irq != -1)
pr_cont(", parent IRQ: %d\n", parent_irq);
else
@@ -192,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
}
#ifdef CONFIG_OF
-int __init fpga_irq_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init fpga_irq_of_init(struct device_node *node,
+ struct device_node *parent)
{
void __iomem *base;
u32 clear_mask;
@@ -222,7 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node,
parent_irq = -1;
}
- fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
+ fpga_irq_init(base, parent_irq, valid_mask, node);
/*
* On Versatile AB/PB, some secondary interrupts have a direct
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 356a59755d63..238d3d344949 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -32,6 +32,8 @@
#define MER_ME (1<<0)
#define MER_HIE (1<<1)
+#define SPURIOUS_IRQ (-1U)
+
static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
struct xintc_irq_chip {
@@ -110,20 +112,6 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
-unsigned int xintc_get_irq(void)
-{
- unsigned int irq = -1;
- u32 hwirq;
-
- hwirq = xintc_read(primary_intc, IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(primary_intc->root_domain, hwirq);
-
- pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct xintc_irq_chip *irqc = d->host_data;
@@ -164,6 +152,19 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static void xil_intc_handle_irq(struct pt_regs *regs)
+{
+ u32 hwirq;
+
+ do {
+ hwirq = xintc_read(primary_intc, IVR);
+ if (unlikely(hwirq == SPURIOUS_IRQ))
+ break;
+
+ generic_handle_domain_irq(primary_intc->root_domain, hwirq);
+ } while (true);
+}
+
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
@@ -233,6 +234,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
primary_intc = irqc;
irq_set_default_host(primary_intc->root_domain);
+ set_handle_irq(xil_intc_handle_irq);
}
return 0;
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 173e6520e06e..d96916cf6a41 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,23 +21,19 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define PDC_MAX_IRQS 168
#define PDC_MAX_GPIO_IRQS 256
-#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
-#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
-
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
-#define PDC_NO_PARENT_IRQ ~0UL
-
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
u32 cnt;
};
+#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base)
+
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
static struct pdc_pin_region *pdc_region;
@@ -56,17 +52,18 @@ static u32 pdc_reg_read(int reg, u32 i)
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
+ unsigned long enable;
+ unsigned long flags;
u32 index, mask;
- u32 enable;
index = pin_out / 32;
mask = pin_out % 32;
- raw_spin_lock(&pdc_lock);
+ raw_spin_lock_irqsave(&pdc_lock, flags);
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
- enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
+ __assign_bit(mask, &enable, on);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
- raw_spin_unlock(&pdc_lock);
+ raw_spin_unlock_irqrestore(&pdc_lock, flags);
}
static void qcom_pdc_gic_disable(struct irq_data *d)
@@ -186,34 +183,17 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
};
-static irq_hw_number_t get_parent_hwirq(int pin)
+static struct pdc_pin_region *get_pin_region(int pin)
{
int i;
- struct pdc_pin_region *region;
for (i = 0; i < pdc_region_cnt; i++) {
- region = &pdc_region[i];
- if (pin >= region->pin_base &&
- pin < region->pin_base + region->cnt)
- return (region->parent_base + pin - region->pin_base);
+ if (pin >= pdc_region[i].pin_base &&
+ pin < pdc_region[i].pin_base + pdc_region[i].cnt)
+ return &pdc_region[i];
}
- return PDC_NO_PARENT_IRQ;
-}
-
-static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *hwirq, unsigned int *type)
-{
- if (is_of_node(fwspec->fwnode)) {
- if (fwspec->param_count != 2)
- return -EINVAL;
-
- *hwirq = fwspec->param[0];
- *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
- return 0;
- }
-
- return -EINVAL;
+ return NULL;
}
static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
@@ -221,55 +201,12 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
{
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq, parent_hwirq;
- unsigned int type;
- int ret;
-
- ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &qcom_pdc_gic_chip, NULL);
- if (ret)
- return ret;
-
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == PDC_NO_PARENT_IRQ)
- return irq_domain_disconnect_hierarchy(domain->parent, virq);
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- type = IRQ_TYPE_EDGE_RISING;
-
- if (type & IRQ_TYPE_LEVEL_MASK)
- type = IRQ_TYPE_LEVEL_HIGH;
-
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 3;
- parent_fwspec.param[0] = 0;
- parent_fwspec.param[1] = parent_hwirq;
- parent_fwspec.param[2] = type;
-
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
-}
-
-static const struct irq_domain_ops qcom_pdc_ops = {
- .translate = qcom_pdc_translate,
- .alloc = qcom_pdc_alloc,
- .free = irq_domain_free_irqs_common,
-};
-
-static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
-{
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq, parent_hwirq;
+ struct pdc_pin_region *region;
+ irq_hw_number_t hwirq;
unsigned int type;
int ret;
- ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -281,8 +218,8 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ region = get_pin_region(hwirq);
+ if (!region)
return irq_domain_disconnect_hierarchy(domain->parent, virq);
if (type & IRQ_TYPE_EDGE_BOTH)
@@ -294,23 +231,16 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0;
- parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[1] = pin_to_hwirq(region, hwirq);
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
&parent_fwspec);
}
-static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- return bus_token == DOMAIN_BUS_WAKEUP;
-}
-
-static const struct irq_domain_ops qcom_pdc_gpio_ops = {
- .select = qcom_pdc_gpio_domain_select,
- .alloc = qcom_pdc_gpio_alloc,
+static const struct irq_domain_ops qcom_pdc_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = qcom_pdc_alloc,
.free = irq_domain_free_irqs_common,
};
@@ -361,7 +291,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
+ struct irq_domain *parent_domain, *pdc_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -383,32 +313,21 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
- pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS,
- of_fwnode_handle(node),
- &qcom_pdc_ops, NULL);
- if (!pdc_domain) {
- pr_err("%pOF: GIC domain add failed\n", node);
- ret = -ENOMEM;
- goto fail;
- }
-
- pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
+ pdc_domain = irq_domain_create_hierarchy(parent_domain,
IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
PDC_MAX_GPIO_IRQS,
of_fwnode_handle(node),
- &qcom_pdc_gpio_ops, NULL);
- if (!pdc_gpio_domain) {
- pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
+ &qcom_pdc_ops, NULL);
+ if (!pdc_domain) {
+ pr_err("%pOF: PDC domain add failed\n", node);
ret = -ENOMEM;
- goto remove;
+ goto fail;
}
- irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+ irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP);
return 0;
-remove:
- irq_domain_remove(pdc_domain);
fail:
kfree(pdc_region);
iounmap(pdc_base);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index bd087cca1c1d..af17459c1a5c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc)
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
- dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
+ if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
+ printk(KERN_WARNING
+ "HFC-PCI: No usable DMA configuration!\n");
+ return -EIO;
+ }
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
GFP_KERNEL);
/* We silently assume the address is okay if nonzero */
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index e11ca6bbc7f4..c3b2c99b5cd5 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
{
int found = 0;
- char *dup, *tok, *name, *args;
+ char *dup, *next, *tok, *name, *args;
struct dsp_element_entry *entry, *n;
struct dsp_pipeline_entry *pipeline_entry;
struct mISDN_dsp_element *elem;
@@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
if (!list_empty(&pipeline->list))
_dsp_pipeline_destroy(pipeline);
- dup = kstrdup(cfg, GFP_ATOMIC);
+ dup = next = kstrdup(cfg, GFP_ATOMIC);
if (!dup)
return 0;
- while ((tok = strsep(&dup, "|"))) {
+ while ((tok = strsep(&next, "|"))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "(");
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index d03cfd3c0bfb..c87686bd7c18 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -266,14 +266,12 @@ static int cr0014114_probe(struct spi_device *spi)
return 0;
}
-static int cr0014114_remove(struct spi_device *spi)
+static void cr0014114_remove(struct spi_device *spi)
{
struct cr0014114 *priv = spi_get_drvdata(spi);
cancel_delayed_work_sync(&priv->work);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct of_device_id cr0014114_dt_ids[] = {
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 20dc9b9d7dea..cf5fb1195f87 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -85,15 +85,13 @@ eledcr:
return ret;
}
-static int dac124s085_remove(struct spi_device *spi)
+static void dac124s085_remove(struct spi_device *spi)
{
struct dac124s085 *dac = spi_get_drvdata(spi);
int i;
for (i = 0; i < ARRAY_SIZE(dac->leds); i++)
led_classdev_unregister(&dac->leds[i].ldev);
-
- return 0;
}
static struct spi_driver dac124s085_driver = {
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
index f9eb59a25570..7e7b617bcd56 100644
--- a/drivers/leds/leds-el15203000.c
+++ b/drivers/leds/leds-el15203000.c
@@ -315,13 +315,11 @@ static int el15203000_probe(struct spi_device *spi)
return el15203000_probe_dt(priv);
}
-static int el15203000_remove(struct spi_device *spi)
+static void el15203000_remove(struct spi_device *spi)
{
struct el15203000 *priv = spi_get_drvdata(spi);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct of_device_id el15203000_dt_ids[] = {
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index f1964c96fb15..2bc5c99daf51 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -130,13 +130,11 @@ static int spi_byte_probe(struct spi_device *spi)
return 0;
}
-static int spi_byte_remove(struct spi_device *spi)
+static void spi_byte_remove(struct spi_device *spi)
{
struct spi_byte_led *led = spi_get_drvdata(spi);
mutex_destroy(&led->mutex);
-
- return 0;
}
static struct spi_driver spi_byte_driver = {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b5ea378e66cb..998a5cfdbc4e 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -204,6 +204,7 @@ config BLK_DEV_DM
tristate "Device mapper support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select BLK_DEV_DM_BUILTIN
+ select BLK_MQ_STACKING
depends on DAX || DAX=n
help
Device-mapper is a low level volume manager. It works by allowing
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 88c573eeb598..ad9f16689419 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&check_state->started) == 0);
for (i = 0; i < check_state->total_threads; i++) {
if (check_state->infos[i].result) {
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9c6f9ec55b72..020712c5203f 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 61bd79babf7a..7c2ca52ca3e4 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset;
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl);
@@ -611,11 +609,9 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, bio->bi_inline_vecs, 1);
- bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
@@ -773,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b9c3d27ec093..99499d1f6e66 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d15aae6c51c1..fdd0194f84dd 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -44,10 +44,10 @@ static void bio_csum(struct bio *bio, struct bkey *k)
uint64_t csum = 0;
bio_for_each_segment(bv, bio, iter) {
- void *d = kmap(bv.bv_page) + bv.bv_offset;
+ void *d = bvec_kmap_local(&bv);
csum = crc64_be(csum, d, bv.bv_len);
- kunmap(bv.bv_page);
+ kunmap_local(d);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -685,8 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
- __bio_clone_fast(bio, orig_bio);
+ bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
@@ -831,11 +830,11 @@ static void cached_dev_read_done(struct closure *cl)
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -913,14 +912,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
@@ -1025,21 +1023,21 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
- &dc->disk.bio_split);
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
- flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
- s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
+ s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &dc->disk.bio_split);
/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 140f35dc0c45..bf3de149d3c9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -18,7 +18,6 @@
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
@@ -343,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_init(bio, dc->sb_bv, 1);
- bio_set_dev(bio, dc->bdev);
+ bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -387,8 +385,7 @@ void bcache_write_super(struct cache_set *c)
if (ca->sb.version < version)
ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
+ bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -2240,7 +2237,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
+ bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
/*
* when ca->sb.njournal_buckets is not zero, journal exists,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c7560f66dca8..9ee0005874cd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
@@ -585,10 +585,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
sectors_dirty = atomic_add_return(s,
d->stripe_sectors_dirty + stripe);
- if (sectors_dirty == d->stripe_size)
- set_bit(stripe, d->full_dirty_stripes);
- else
- clear_bit(stripe, d->full_dirty_stripes);
+ if (sectors_dirty == d->stripe_size) {
+ if (!test_bit(stripe, d->full_dirty_stripes))
+ set_bit(stripe, d->full_dirty_stripes);
+ } else {
+ if (test_bit(stripe, d->full_dirty_stripes))
+ clear_bit(stripe, d->full_dirty_stripes);
+ }
nr_sectors -= s;
stripe_offset = 0;
@@ -998,9 +1001,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&state->started) == 0);
out:
kfree(state);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 447d030036d1..89fdfb49d564 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -744,21 +744,14 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_unlock_irq(&cache->lock);
}
-static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, bool bio_has_pbd)
-{
- if (bio_has_pbd)
- check_if_tick_bio_needed(cache, bio);
- remap_to_origin(cache, bio);
- if (bio_data_dir(bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
-}
-
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
dm_oblock_t oblock)
{
// FIXME: check_if_tick_bio_needed() is called way too much through this interface
- __remap_to_origin_clear_discard(cache, bio, oblock, true);
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
}
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -826,16 +819,15 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
+ struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
+ GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio);
bio_chain(origin_bio, bio);
- /*
- * Passing false to __remap_to_origin_clear_discard() skips
- * all code that might use per_bio_data (since clone doesn't have it)
- */
- __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+
+ if (bio_data_dir(origin_bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
submit_bio(origin_bio);
remap_to_cache(cache, bio, cblock);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index b855fef4f38a..72d18c3fbf1f 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -11,7 +11,6 @@
#include <linux/kthread.h>
#include <linux/ktime.h>
-#include <linux/genhd.h>
#include <linux/blk-mq.h>
#include <linux/blk-crypto-profile.h>
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d4ae31558826..e2b0af4a2ee8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -234,7 +234,7 @@ static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
-static void clone_init(struct dm_crypt_io *, struct bio *);
+static void crypt_endio(struct bio *clone);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
@@ -1364,11 +1364,10 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
}
if (r == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
}
@@ -1672,11 +1671,10 @@ retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
- if (!clone)
- goto out;
-
- clone_init(io, clone);
+ clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
+ GFP_NOIO, &cc->bs);
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
remaining_size = size;
@@ -1702,7 +1700,7 @@ retry:
bio_put(clone);
clone = NULL;
}
-out:
+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock);
@@ -1829,34 +1827,25 @@ static void crypt_endio(struct bio *clone)
crypt_dec_pending(io);
}
-static void clone_init(struct dm_crypt_io *io, struct bio *clone)
-{
- struct crypt_config *cc = io->cc;
-
- clone->bi_private = io;
- clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, cc->dev->bdev);
- clone->bi_opf = io->base_bio->bi_opf;
-}
-
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
/*
- * We need the original biovec array in order to decrypt
- * the whole bio data *afterwards* -- thanks to immutable
- * biovecs we don't need to worry about the block layer
- * modifying the biovec array; so leverage bio_clone_fast().
+ * We need the original biovec array in order to decrypt the whole bio
+ * data *afterwards* -- thanks to immutable biovecs we don't need to
+ * worry about the block layer modifying the biovec array; so leverage
+ * bio_alloc_clone().
*/
- clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
+ clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
crypt_inc_pending(io);
- clone_init(io, clone);
clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
@@ -2179,11 +2168,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
io->error = BLK_STS_PROTECTION;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index eb4b5e52bd6f..c58a5111cb57 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1788,12 +1788,11 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- char b[BDEVNAME_SIZE];
sector_t s;
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%s: Checksum failed at sector 0x%llx",
- bio_devname(bio, b), s);
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, s);
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2d3cda0acacb..23e038f8dc84 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -345,11 +345,10 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
+ GFP_NOIO, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
- bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 139b09b06eda..c9d036d6bb2e 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -217,18 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
void *ptr;
size_t ret;
- bio = bio_alloc(GFP_KERNEL, 1);
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -275,18 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_pages);
- if (!bio) {
- DMERR("Couldn't alloc inline data bio");
- goto error;
- }
-
+ bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
+ GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE);
@@ -322,7 +310,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
error_bio:
bio_free_pages(bio);
bio_put(bio);
-error:
put_io_block(lc);
return -1;
}
@@ -363,17 +350,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -385,18 +367,13 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL,
- bio_max_segs(block->vec_cnt - i));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev,
+ bio_max_segs(block->vec_cnt - i),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 579ab6183d4d..6948d5db9092 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -303,21 +303,6 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- blk_status_t r;
-
- if (blk_queue_io_stat(clone->q))
- clone->rq_flags |= RQF_IO_STAT;
-
- clone->start_time_ns = ktime_get_ns();
- r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
- return r;
-}
-
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
@@ -398,13 +383,20 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ ret = blk_insert_cloned_request(clone);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
+ default:
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, ret);
}
break;
case DM_MAPIO_REQUEUE:
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index dcf34c6b05ad..0d336b5ec571 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,11 +141,6 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
-
- /*
- * Flush data after merge.
- */
- struct bio flush_bio;
};
/*
@@ -1127,17 +1122,6 @@ shut:
static void error_bios(struct bio *bio);
-static int flush_data(struct dm_snapshot *s)
-{
- struct bio *flush_bio = &s->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, s->origin->bdev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- return submit_bio_wait(flush_bio);
-}
-
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1151,7 +1135,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
- if (flush_data(s) < 0) {
+ if (blkdev_issue_flush(s->origin->bdev) < 0) {
DMERR("Flush after merge failed: shutting down merge");
goto shut;
}
@@ -1340,7 +1324,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
- bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1528,8 +1511,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- bio_uninit(&s->flush_bio);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ec119d2422d5..f4234d615aa1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -282,8 +282,6 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
-
- struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -1179,25 +1177,17 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
- discard_parent = bio_alloc(GFP_NOIO, 1);
- if (!discard_parent) {
- DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
- dm_device_name(tc->pool->pool_md));
- queue_passdown_pt2(m);
-
- } else {
- discard_parent->bi_end_io = passdown_endio;
- discard_parent->bi_private = m;
-
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
- struct discard_op op;
-
- begin_discard(&op, tc, discard_parent);
- r = issue_discard(&op, m->data_block, data_end);
- end_discard(&op, r);
- }
+ discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
+
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
}
}
@@ -2913,7 +2903,6 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
- bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2994,7 +2983,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
- bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3201,13 +3189,8 @@ static void metadata_low_callback(void *context)
static int metadata_pre_commit_callback(void *context)
{
struct pool *pool = context;
- struct bio *flush_bio = &pool->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, pool->data_dev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return submit_bio_wait(flush_bio);
+ return blkdev_issue_flush(pool->data_dev);
}
static sector_t get_dev_size(struct block_device *bdev)
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f31591d2d25..5630b470ba42 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
max_pages = e->wc_list_contiguous;
- bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
+ bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
+ GFP_NOIO, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
- bio_set_dev(bio, wc->dev->bdev);
bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
@@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ if (WC_MODE_FUA(wc))
+ bio->bi_opf |= REQ_FUA;
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index ee4626d08557..e5f1eb27ce2e 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -550,11 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
if (!mblk)
return ERR_PTR(-ENOMEM);
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- dmz_free_mblock(zmd, mblk);
- return ERR_PTR(-ENOMEM);
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
+ GFP_NOIO);
spin_lock(&zmd->mblk_lock);
@@ -578,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -725,19 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- set_bit(DMZ_META_ERROR, &mblk->state);
- return -ENOMEM;
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
+ GFP_NOIO);
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -759,13 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio)
- return -ENOMEM;
-
+ bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
- bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 166c4e9d99c9..a3f6d3ef3817 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (dev->flags & DMZ_BDEV_DYING)
return -EIO;
- clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
+ clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dev->bdev);
bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c0ae8087c602..183ce0d6728f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -79,10 +79,14 @@ struct clone_info {
#define DM_IO_BIO_OFFSET \
(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
+static inline struct dm_target_io *clone_to_tio(struct bio *clone)
+{
+ return container_of(clone, struct dm_target_io, clone);
+}
+
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- if (!tio->inside_dm_io)
+ if (!clone_to_tio(bio)->inside_dm_io)
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
}
@@ -477,10 +481,7 @@ out:
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct dm_io *io = tio->io;
-
- return jiffies_to_nsecs(io->start_time);
+ return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
@@ -489,7 +490,7 @@ static void start_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- io->start_time = bio_start_io_acct(bio);
+ bio_start_io_acct_time(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -519,11 +520,9 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
- if (!clone)
- return NULL;
+ clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ tio = clone_to_tio(clone);
tio->inside_dm_io = true;
tio->io = NULL;
@@ -535,7 +534,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io->md = md;
spin_lock_init(&io->endio_lock);
- start_io_acct(io);
+ io->start_time = jiffies;
return io;
}
@@ -545,8 +544,8 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
bio_put(&io->tio.clone);
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
- unsigned target_bio_nr, gfp_t gfp_mask)
+static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
{
struct dm_target_io *tio;
@@ -554,11 +553,12 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
+ struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
+ gfp_mask, &ci->io->md->bs);
if (!clone)
return NULL;
- tio = container_of(clone, struct dm_target_io, clone);
+ tio = clone_to_tio(clone);
tio->inside_dm_io = false;
}
@@ -566,15 +566,16 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
tio->io = ci->io;
tio->ti = ti;
tio->target_bio_nr = target_bio_nr;
+ tio->len_ptr = len;
- return tio;
+ return &tio->clone;
}
-static void free_tio(struct dm_target_io *tio)
+static void free_tio(struct bio *clone)
{
- if (tio->inside_dm_io)
+ if (clone_to_tio(clone)->inside_dm_io)
return;
- bio_put(&tio->clone);
+ bio_put(clone);
}
/*
@@ -879,7 +880,7 @@ static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -930,7 +931,7 @@ static void clone_endio(struct bio *bio)
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(bio);
dm_io_dec_pending(io, error);
}
@@ -1085,7 +1086,7 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
*/
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
@@ -1115,11 +1116,11 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static void __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct bio *clone)
{
+ struct dm_target_io *tio = clone_to_tio(clone);
int r;
sector_t sector;
- struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
@@ -1164,7 +1165,7 @@ static void __map_bio(struct dm_target_io *tio)
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(clone);
dm_io_dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
@@ -1172,7 +1173,7 @@ static void __map_bio(struct dm_target_io *tio)
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(clone);
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
@@ -1190,106 +1191,75 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
/*
* Creates a bio that consists of range of complete bvecs.
*/
-static int clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned len)
+static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+ sector_t sector, unsigned *len)
{
- struct bio *clone = &tio->clone;
- int r;
-
- __bio_clone_fast(clone, bio);
-
- r = bio_crypt_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
-
- if (bio_integrity(bio)) {
- if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
- !dm_target_passes_integrity(tio->ti->type))) {
- DMWARN("%s: the target %s doesn't support integrity data.",
- dm_device_name(tio->io->md),
- tio->ti->type->name);
- return -EIO;
- }
-
- r = bio_integrity_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
- }
+ struct bio *bio = ci->bio, *clone;
+ clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
- clone->bi_iter.bi_size = to_bytes(len);
+ clone->bi_iter.bi_size = to_bytes(*len);
if (bio_integrity(bio))
bio_integrity_trim(clone);
+ __map_bio(clone);
return 0;
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios)
+ struct dm_target *ti, unsigned num_bios,
+ unsigned *len)
{
- struct dm_target_io *tio;
+ struct bio *bio;
int try;
- if (!num_bios)
- return;
-
- if (num_bios == 1) {
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- bio_list_add(blist, &tio->clone);
- return;
- }
-
for (try = 0; try < 2; try++) {
int bio_nr;
- struct bio *bio;
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
- if (!tio)
+ bio = alloc_tio(ci, ti, bio_nr, len,
+ try ? GFP_NOIO : GFP_NOWAIT);
+ if (!bio)
break;
- bio_list_add(blist, &tio->clone);
+ bio_list_add(blist, bio);
}
if (try)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
- while ((bio = bio_list_pop(blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- free_tio(tio);
- }
+ while ((bio = bio_list_pop(blist)))
+ free_tio(bio);
}
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target_io *tio, unsigned *len)
-{
- struct bio *clone = &tio->clone;
-
- tio->len_ptr = len;
-
- __bio_clone_fast(clone, ci->bio);
- if (len)
- bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
-}
-
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
- struct bio *bio;
- struct dm_target_io *tio;
-
- alloc_multiple_bios(&blist, ci, ti, num_bios);
+ struct bio *clone;
- while ((bio = bio_list_pop(&blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- __clone_and_map_simple_bio(ci, tio, len);
+ switch (num_bios) {
+ case 0:
+ break;
+ case 1:
+ clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
+ if (len)
+ bio_setup_sector(clone, ci->sector, *len);
+ __map_bio(clone);
+ break;
+ default:
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ while ((clone = bio_list_pop(&blist))) {
+ if (len)
+ bio_setup_sector(clone, ci->sector, *len);
+ __map_bio(clone);
+ }
+ break;
}
}
@@ -1304,9 +1274,8 @@ static int __send_empty_flush(struct clone_info *ci)
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- bio_set_dev(&flush_bio, ci->io->md->disk->part0);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
ci->bio = &flush_bio;
ci->sector_count = 0;
@@ -1319,25 +1288,6 @@ static int __send_empty_flush(struct clone_info *ci)
return 0;
}
-static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
-{
- struct bio *bio = ci->bio;
- struct dm_target_io *tio;
- int r;
-
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- return r;
- }
- __map_bio(tio);
-
- return 0;
-}
-
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios)
{
@@ -1442,9 +1392,6 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector;
}
-#define __dm_part_stat_sub(part, field, subnd) \
- (part_stat_get(part, field) -= (subnd))
-
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1480,23 +1427,12 @@ static void __split_and_process_bio(struct mapped_device *md,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
}
}
+ start_io_acct(ci.io);
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
@@ -2091,7 +2027,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(md->queue);
+ blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index c0dc6f2ef4a3..50ad818978a4 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
- bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
b->bi_end_io = faulty_fail;
bio = b;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index e7d6486f090f..3081a936350d 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -121,11 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio, NULL, 0);
- __bio_clone_fast(&mp_bh->bio, bio);
+ bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
@@ -299,7 +297,6 @@ static void multipathd(struct md_thread *thread)
md_check_recovery(mddev);
for (;;) {
- char b[BDEVNAME_SIZE];
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head))
break;
@@ -311,13 +308,13 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else {
- pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: redirecting sector %llu to another IO path\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5881d05a76eb..309b3af906ad 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
+ bi = bio_alloc_bioset(rdev->bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bio_set_dev(bi, rdev->bdev);
- bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* If an error occurred, call md_error
*/
struct bio *bio;
- int ff = 0;
if (!page)
return;
@@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+ 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+ GFP_NOIO, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
- bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
test_bit(FailFast, &rdev->flags) &&
!test_bit(LastDev, &rdev->flags))
- ff = MD_FAILFAST;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
+ bio->bi_opf |= MD_FAILFAST;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
@@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio bio;
struct bio_vec bvec;
- bio_init(&bio, &bvec, 1);
-
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(&bio, rdev->meta_bdev);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
else
- bio_set_dev(&bio, rdev->bdev);
- bio.bi_opf = op | op_flags;
+ bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
@@ -5869,10 +5867,6 @@ int md_run(struct mddev *mddev)
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
- /* Set the NOWAIT flags if all underlying devices support it */
- if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
-
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
@@ -6010,6 +6004,10 @@ int md_run(struct mddev *mddev)
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -8636,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio)
*/
void md_account_bio(struct mddev *mddev, struct bio **bio)
{
+ struct block_device *bdev = (*bio)->bi_bdev;
struct md_io_acct *md_io_acct;
struct bio *clone;
- if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
+ if (!blk_queue_io_stat(bdev->bd_disk->queue))
return;
- clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
+ clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio);
@@ -9583,7 +9582,7 @@ static int md_notify_reboot(struct notifier_block *this,
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
- mdelay(1000*1);
+ msleep(1000);
return NOTIFY_DONE;
}
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 83f9a4f3d82e..e61f6cad4e08 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -28,6 +28,11 @@ struct resync_pages {
struct page *pages[RESYNC_PAGES];
};
+struct raid1_plug_cb {
+ struct blk_plug_cb cb;
+ struct bio_list pending;
+};
+
static void rbio_pool_free(void *rbio, void *data)
{
kfree(rbio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2d8acb1e988..934186724d21 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1126,7 +1125,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
+ behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1166,12 +1166,6 @@ free_pages:
bio_put(behind_bio);
}
-struct raid1_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
@@ -1183,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1319,13 +1312,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+ &mddev->bio_set);
r1_bio->bios[rdisk] = read_bio;
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
- bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1545,24 +1538,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO, &mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
-
if (r1_bio->behind_master_bio) {
+ mbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
if (test_bit(CollisionCheck, &rdev->flags))
wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- } else if (mddev->serialize_policy)
- wait_for_serialization(rdev, r1_bio);
+ } else {
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
+
+ if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
+ }
r1_bio->bios[i] = mbio;
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &rdev->flags) &&
@@ -1586,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -2070,15 +2062,14 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
} while (!success && d != r1_bio->read_disk);
if (!success) {
- char b[BDEVNAME_SIZE];
int abort = 0;
/* Cannot read from anywhere, this block is lost.
* Record a bad block on each device. If that doesn't
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
- mdname(mddev), bio_devname(bio, b),
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), bio->bi_bdev,
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
@@ -2165,11 +2156,10 @@ static void process_checks(struct r1bio *r1_bio)
continue;
/* fixup the bio for reuse, but preserve errno */
status = b->bi_status;
- bio_reset(b);
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
@@ -2416,12 +2406,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
/* Write at 'sector' for 'sectors'*/
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- wbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
} else {
- wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
+ GFP_NOIO, &mddev->bio_set);
}
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@@ -2430,7 +2420,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
- bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0)
/* failure! */
@@ -2650,7 +2639,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
@@ -3058,7 +3047,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index ccf10e59b116..ebb6788820e7 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -87,7 +87,6 @@ struct r1conf {
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
- int pending_count;
/* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2b969f70a31f..b369ebb965a9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset;
}
-struct raid10_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
- cb);
+ struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
struct mddev *mddev = plug->cb.data;
struct r10conf *conf = mddev->private;
struct bio *bio;
@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1208,14 +1199,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
@@ -1239,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct blk_plug_cb *cb;
- struct raid10_plug_cb *plug = NULL;
+ struct raid1_plug_cb *plug = NULL;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
int devnum = r10_bio->devs[n_copy].devnum;
@@ -1255,7 +1245,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
} else
rdev = conf->mirrors[devnum].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
if (replacement)
r10_bio->devs[n_copy].repl_bio = mbio;
else
@@ -1263,7 +1253,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast,
@@ -1282,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
+ plug = container_of(cb, struct raid1_plug_cb, cb);
else
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -1812,7 +1799,8 @@ retry_discard:
*/
if (r10_bio->devs[disk].bio) {
struct md_rdev *rdev = conf->mirrors[disk].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
mbio->bi_end_io = raid10_end_discard_request;
mbio->bi_private = r10_bio;
r10_bio->devs[disk].bio = mbio;
@@ -1825,7 +1813,8 @@ retry_discard:
}
if (r10_bio->devs[disk].repl_bio) {
struct md_rdev *rrdev = conf->mirrors[disk].replacement;
- rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
rbio->bi_end_io = raid10_end_discard_request;
rbio->bi_private = r10_bio;
r10_bio->devs[disk].repl_bio = rbio;
@@ -2422,7 +2411,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* bi_vecs, as the read request might have corrupted these
*/
rp = get_resync_pages(tbio);
- bio_reset(tbio);
+ bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
@@ -2430,7 +2419,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
- bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2441,7 +2429,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio);
}
@@ -2894,12 +2881,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
- wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
@@ -3160,12 +3147,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
}
}
@@ -4892,14 +4879,12 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
-
- bio_set_dev(read_bio, rdev->bdev);
+ read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
+ GFP_KERNEL, &mddev->bio_set);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
- bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index c34bb196790e..5c0804d8bb1f 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -75,7 +75,6 @@ struct r10conf {
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- int pending_count;
spinlock_t resync_lock;
atomic_t nr_pending;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b5dcaabbc15..a7d50ff9020a 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs);
+ struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE, GFP_NOIO, &log->bs);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -1267,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio)
r5l_io_run_stripes(io);
list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ bio_uninit(bio);
}
/*
@@ -1302,10 +1303,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
- bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
+ bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -1623,10 +1623,10 @@ struct r5l_recovery_ctx {
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+ struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
- struct bio *ra_bio; /* bio to do the read ahead */
};
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1634,10 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs);
- if (!ctx->ra_bio)
- return -ENOMEM;
-
ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1649,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1;
}
- if (ctx->total_pages == 0) {
- bio_put(ctx->ra_bio);
+ if (ctx->total_pages == 0)
return -ENOMEM;
- }
ctx->pool_offset = 0;
return 0;
@@ -1665,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
- bio_put(ctx->ra_bio);
}
/*
@@ -1678,17 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
- bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
- ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+ struct bio bio;
+ int ret;
+
+ bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+ R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+ bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) {
- bio_add_page(ctx->ra_bio,
- ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+ __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+ 0);
ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1697,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break;
}
- return submit_bio_wait(ctx->ra_bio);
+ ret = submit_bio_wait(&bio);
+ bio_uninit(&bio);
+ return ret;
}
/*
@@ -3108,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 4ab417915d7f..ea4cd8dd4dc3 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0);
- bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
+ bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
+ REQ_OP_WRITE | REQ_FUA);
pplhdr = page_address(io->header_page);
clear_page(pplhdr);
@@ -416,12 +417,10 @@ static void ppl_log_endio(struct bio *bio)
static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
{
- char b[BDEVNAME_SIZE];
-
- pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
+ pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
__func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector,
- bio_devname(bio, b));
+ bio->bi_bdev);
submit_bio(bio);
}
@@ -465,8 +464,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio;
- bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio->bi_write_hint = ppl_conf->write_hint;
@@ -496,11 +493,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
struct bio *prev = bio;
- bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS,
+ bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
+ prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
- bio->bi_opf = prev->bi_opf;
bio->bi_write_hint = prev->bi_write_hint;
- bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -590,9 +586,8 @@ static void ppl_flush_endio(struct bio *bio)
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
struct r5conf *conf = ppl_conf->mddev->private;
- char b[BDEVNAME_SIZE];
- pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+ pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
if (bio->bi_status) {
struct md_rdev *rdev;
@@ -635,16 +630,14 @@ static void ppl_do_flush(struct ppl_io_unit *io)
if (bdev) {
struct bio *bio;
- char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ &ppl_conf->flush_bs);
bio->bi_private = io;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_end_io = ppl_flush_endio;
- pr_debug("%s: dev: %s\n", __func__,
- bio_devname(bio, b));
+ pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
submit_bio(bio);
flushed_disks++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ffe720c73b0a..8bd5f06390ea 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int i, disks = sh->disks;
struct stripe_head *head_sh = sh;
struct bio_list pending_bios = BIO_EMPTY_LIST;
+ struct r5dev *dev;
bool should_defer;
might_sleep();
@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
op_flags |= REQ_SYNC;
again:
- bi = &sh->dev[i].req;
- rbi = &sh->dev[i].rreq; /* For writing to replacement */
+ dev = &sh->dev[i];
+ bi = &dev->req;
+ rbi = &dev->rreq; /* For writing to replacement */
rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement);
@@ -1171,8 +1173,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(bi, rdev->bdev);
- bio_set_op_attrs(bi, op, op_flags);
+ bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
@@ -1238,8 +1239,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(rbi, rrdev->bdev);
- bio_set_op_attrs(rbi, op, op_flags);
+ bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
int disks, struct r5conf *conf)
{
struct stripe_head *sh;
- int i;
sh = kmem_cache_zalloc(sc, gfp);
if (sh) {
@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
atomic_set(&sh->count, 1);
sh->raid_conf = conf;
sh->log_start = MaxSector;
- for (i = 0; i < disks; i++) {
- struct r5dev *dev = &sh->dev[i];
-
- bio_init(&dev->req, &dev->vec, 1);
- bio_init(&dev->rreq, &dev->rvec, 1);
- }
if (raid5_has_ppl(conf)) {
sh->ppl_page = alloc_page(gfp);
@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
- bio_reset(bi);
+ bio_uninit(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
- bio_reset(bi);
+ bio_uninit(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -5438,14 +5429,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
return 0;
}
- align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+ align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
+ &mddev->io_acct_set);
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio;
- bio_set_dev(align_bio, rdev->bdev);
align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = md_io_acct;
align_bio->bi_iter.bi_sector = sector;
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 6f2a66bc87fb..6be4e5528879 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -625,7 +625,7 @@ fail_regulator:
return ret;
}
-static int
+static void
cxd2880_spi_remove(struct spi_device *spi)
{
struct cxd2880_dvb_spi *dvb_spi = spi_get_drvdata(spi);
@@ -643,8 +643,6 @@ cxd2880_spi_remove(struct spi_device *spi)
kfree(dvb_spi);
pr_info("cxd2880_spi remove ok.\n");
-
- return 0;
}
static const struct spi_device_id cxd2880_spi_id[] = {
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index f86ef1ca1288..75c21a93e6d0 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -458,13 +458,11 @@ static int gs_probe(struct spi_device *spi)
return ret;
}
-static int gs_remove(struct spi_device *spi)
+static void gs_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static struct spi_driver gs_driver = {
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index 44247049a319..ad6c72c1ed04 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -472,7 +472,7 @@ err:
return ret;
}
-static int msi001_remove(struct spi_device *spi)
+static void msi001_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
struct msi001_dev *dev = sd_to_msi001_dev(sd);
@@ -486,7 +486,6 @@ static int msi001_remove(struct spi_device *spi)
v4l2_device_unregister_subdev(&dev->sd);
v4l2_ctrl_handler_free(&dev->hdl);
kfree(dev);
- return 0;
}
static const struct spi_device_id msi001_id_table[] = {
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 0cda6c6baefc..3993bdd4b519 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1943,22 +1943,6 @@ static void msb_io_work(struct work_struct *work)
static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
-static int msb_bd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct msb_data *msb = disk->private_data;
-
- dbg_verbose("block device open");
-
- mutex_lock(&msb_disk_lock);
-
- if (msb && msb->card)
- msb->usage_count++;
-
- mutex_unlock(&msb_disk_lock);
- return 0;
-}
-
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
@@ -1968,33 +1952,6 @@ static void msb_data_clear(struct msb_data *msb)
msb->card = NULL;
}
-static int msb_disk_release(struct gendisk *disk)
-{
- struct msb_data *msb = disk->private_data;
-
- dbg_verbose("block device release");
- mutex_lock(&msb_disk_lock);
-
- if (msb) {
- if (msb->usage_count)
- msb->usage_count--;
-
- if (!msb->usage_count) {
- disk->private_data = NULL;
- idr_remove(&msb_disk_idr, msb->disk_id);
- put_disk(disk);
- kfree(msb);
- }
- }
- mutex_unlock(&msb_disk_lock);
- return 0;
-}
-
-static void msb_bd_release(struct gendisk *disk, fmode_t mode)
-{
- msb_disk_release(disk);
-}
-
static int msb_bd_getgeo(struct block_device *bdev,
struct hd_geometry *geo)
{
@@ -2003,6 +1960,17 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0;
}
+static void msb_bd_free_disk(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+
+ kfree(msb);
+}
+
static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2096,10 +2064,9 @@ static void msb_start(struct memstick_dev *card)
}
static const struct block_device_operations msb_bdops = {
- .open = msb_bd_open,
- .release = msb_bd_release,
- .getgeo = msb_bd_getgeo,
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .getgeo = msb_bd_getgeo,
+ .free_disk = msb_bd_free_disk,
};
static const struct blk_mq_ops msb_mq_ops = {
@@ -2147,7 +2114,6 @@ static int msb_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dbg("Set total disk size to %lu sectors", capacity);
- msb->usage_count = 1;
msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
INIT_WORK(&msb->io_work, msb_io_work);
sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
@@ -2229,7 +2195,7 @@ static void msb_remove(struct memstick_dev *card)
msb_data_clear(msb);
mutex_unlock(&msb_disk_lock);
- msb_disk_release(msb->disk);
+ put_disk(msb->disk);
memstick_set_drvdata(card, NULL);
}
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 122e1a8a8bd5..7058f9aefeb9 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -143,7 +143,6 @@ struct ms_boot_page {
} __packed;
struct msb_data {
- unsigned int usage_count;
struct memstick_dev *card;
struct gendisk *disk;
struct request_queue *queue;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c0450397b673..725ba74ded30 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -133,7 +133,6 @@ struct mspro_devinfo {
struct mspro_block_data {
struct memstick_dev *card;
- unsigned int usage_count;
unsigned int caps;
struct gendisk *disk;
struct request_queue *queue;
@@ -178,53 +177,16 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error);
/*** Block device ***/
-static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct mspro_block_data *msb = disk->private_data;
- int rc = -ENXIO;
-
- mutex_lock(&mspro_block_disk_lock);
-
- if (msb && msb->card) {
- msb->usage_count++;
- if ((mode & FMODE_WRITE) && msb->read_only)
- rc = -EROFS;
- else
- rc = 0;
- }
-
- mutex_unlock(&mspro_block_disk_lock);
-
- return rc;
-}
-
-
-static void mspro_block_disk_release(struct gendisk *disk)
+static void mspro_block_bd_free_disk(struct gendisk *disk)
{
struct mspro_block_data *msb = disk->private_data;
int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
mutex_lock(&mspro_block_disk_lock);
-
- if (msb) {
- if (msb->usage_count)
- msb->usage_count--;
-
- if (!msb->usage_count) {
- kfree(msb);
- disk->private_data = NULL;
- idr_remove(&mspro_block_disk_idr, disk_id);
- put_disk(disk);
- }
- }
-
+ idr_remove(&mspro_block_disk_idr, disk_id);
mutex_unlock(&mspro_block_disk_lock);
-}
-static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
-{
- mspro_block_disk_release(disk);
+ kfree(msb);
}
static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -240,10 +202,9 @@ static int mspro_block_bd_getgeo(struct block_device *bdev,
}
static const struct block_device_operations ms_block_bdops = {
- .open = mspro_block_bd_open,
- .release = mspro_block_bd_release,
- .getgeo = mspro_block_bd_getgeo,
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .getgeo = mspro_block_bd_getgeo,
+ .free_disk = mspro_block_bd_free_disk,
};
/*** Information ***/
@@ -1226,7 +1187,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
msb->disk->fops = &ms_block_bdops;
- msb->usage_count = 1;
msb->disk->private_data = msb;
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
@@ -1239,6 +1199,9 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
+ if (msb->read_only)
+ set_disk_ro(msb->disk, true);
+
rc = device_add_disk(&card->dev, msb->disk, NULL);
if (rc)
goto out_cleanup_disk;
@@ -1341,7 +1304,7 @@ static void mspro_block_remove(struct memstick_dev *card)
mspro_block_data_clear(msb);
mutex_unlock(&mspro_block_disk_lock);
- mspro_block_disk_release(msb->disk);
+ put_disk(msb->disk);
memstick_set_drvdata(card, NULL);
}
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 9fe06dda3782..03620c8efe34 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -206,13 +206,11 @@ static int arizona_spi_probe(struct spi_device *spi)
return arizona_dev_init(arizona);
}
-static int arizona_spi_remove(struct spi_device *spi)
+static void arizona_spi_remove(struct spi_device *spi)
{
struct arizona *arizona = spi_get_drvdata(spi);
arizona_dev_exit(arizona);
-
- return 0;
}
static const struct spi_device_id arizona_spi_ids[] = {
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 5faf3766a5e2..b79a57b45c1e 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -55,12 +55,11 @@ static int da9052_spi_probe(struct spi_device *spi)
return da9052_device_init(da9052, id->driver_data);
}
-static int da9052_spi_remove(struct spi_device *spi)
+static void da9052_spi_remove(struct spi_device *spi)
{
struct da9052 *da9052 = spi_get_drvdata(spi);
da9052_device_exit(da9052);
- return 0;
}
static const struct spi_device_id da9052_spi_id[] = {
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 70fa18b04ad2..3d5ce18aa9ae 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -193,13 +193,11 @@ static void pcap_isr_work(struct work_struct *work)
ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
- local_irq_disable();
service = isr & ~msr;
for (irq = pcap->irq_base; service; service >>= 1, irq++) {
if (service & 1)
- generic_handle_irq(irq);
+ generic_handle_irq_safe(irq);
}
- local_irq_enable();
ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
} while (gpio_get_value(pdata->gpio));
}
@@ -392,7 +390,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
return ret;
}
-static int ezx_pcap_remove(struct spi_device *spi)
+static void ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = spi_get_drvdata(spi);
unsigned long flags;
@@ -412,8 +410,6 @@ static int ezx_pcap_remove(struct spi_device *spi)
irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(pcap->workqueue);
-
- return 0;
}
static int ezx_pcap_probe(struct spi_device *spi)
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index f10e53187f67..9ffab9aafd81 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -63,6 +63,8 @@
#define SPIBASE_BYT 0x54
#define SPIBASE_BYT_SZ 512
#define SPIBASE_BYT_EN BIT(1)
+#define BYT_BCR 0xfc
+#define BYT_BCR_WPD BIT(0)
#define SPIBASE_LPT 0x3800
#define SPIBASE_LPT_SZ 512
@@ -1084,12 +1086,57 @@ wdt_done:
return ret;
}
+static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
+{
+ u32 val;
+
+ val = readl(base + BYT_BCR);
+ if (!(val & BYT_BCR_WPD)) {
+ val |= BYT_BCR_WPD;
+ writel(val, base + BYT_BCR);
+ val = readl(base + BYT_BCR);
+ }
+
+ return val & BYT_BCR_WPD;
+}
+
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ unsigned int spi = PCI_DEVFN(13, 2);
+ struct pci_bus *bus = data;
+ u32 bcr;
+
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_bus_write_config_dword(bus, spi, BCR, bcr);
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static int lpc_ich_init_spi(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
- u32 spi_base, rcba, bcr;
+ u32 spi_base, rcba;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1103,6 +1150,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
if (spi_base & SPIBASE_BYT_EN) {
res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
res->end = res->start + SPIBASE_BYT_SZ - 1;
+
+ info->set_writeable = lpc_ich_byt_set_writeable;
}
break;
@@ -1113,8 +1162,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->start = spi_base + SPIBASE_LPT;
res->end = res->start + SPIBASE_LPT_SZ - 1;
- pci_read_config_dword(dev, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_lpt_set_writeable;
+ info->data = dev;
}
break;
@@ -1135,8 +1184,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->start = spi_base & 0xfffffff0;
res->end = res->start + SPIBASE_APL_SZ - 1;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = bus;
}
pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
index e860f5ff0933..da84eb50e53a 100644
--- a/drivers/mfd/madera-spi.c
+++ b/drivers/mfd/madera-spi.c
@@ -112,13 +112,11 @@ static int madera_spi_probe(struct spi_device *spi)
return madera_dev_init(madera);
}
-static int madera_spi_remove(struct spi_device *spi)
+static void madera_spi_remove(struct spi_device *spi)
{
struct madera *madera = spi_get_drvdata(spi);
madera_dev_exit(madera);
-
- return 0;
}
static const struct spi_device_id madera_spi_ids[] = {
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 4d8913d647e6..f803527e5819 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -166,10 +166,9 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
return mc13xxx_common_init(&spi->dev);
}
-static int mc13xxx_spi_remove(struct spi_device *spi)
+static void mc13xxx_spi_remove(struct spi_device *spi)
{
mc13xxx_common_exit(&spi->dev);
- return 0;
}
static struct spi_driver mc13xxx_spi_driver = {
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
index fec2b4ec477c..d2f3d8f1e05a 100644
--- a/drivers/mfd/rsmu_spi.c
+++ b/drivers/mfd/rsmu_spi.c
@@ -220,13 +220,11 @@ static int rsmu_spi_probe(struct spi_device *client)
return rsmu_core_init(rsmu);
}
-static int rsmu_spi_remove(struct spi_device *client)
+static void rsmu_spi_remove(struct spi_device *client)
{
struct rsmu_ddata *rsmu = spi_get_drvdata(client);
rsmu_core_exit(rsmu);
-
- return 0;
}
static const struct spi_device_id rsmu_spi_id[] = {
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 6c5915016be5..ad8055a0e286 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -102,13 +102,11 @@ stmpe_spi_probe(struct spi_device *spi)
return stmpe_probe(&spi_ci, id->driver_data);
}
-static int stmpe_spi_remove(struct spi_device *spi)
+static void stmpe_spi_remove(struct spi_device *spi)
{
struct stmpe *stmpe = spi_get_drvdata(spi);
stmpe_remove(stmpe);
-
- return 0;
}
static const struct of_device_id stmpe_spi_of_match[] = {
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index d701926aa46e..bba38fbc781d 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -50,13 +50,11 @@ static int tps65912_spi_probe(struct spi_device *spi)
return tps65912_device_init(tps);
}
-static int tps65912_spi_remove(struct spi_device *spi)
+static void tps65912_spi_remove(struct spi_device *spi)
{
struct tps65912 *tps = spi_get_drvdata(spi);
tps65912_device_exit(tps);
-
- return 0;
}
static const struct spi_device_id tps65912_spi_id_table[] = {
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index a9e75d80ad36..263055bda48b 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -90,10 +90,9 @@ static int ad_dpot_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->name);
}
-static int ad_dpot_spi_remove(struct spi_device *spi)
+static void ad_dpot_spi_remove(struct spi_device *spi)
{
ad_dpot_remove(&spi->dev);
- return 0;
}
static const struct spi_device_id ad_dpot_spi_id[] = {
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index c3305bdda69c..bee727ed98db 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -440,6 +440,10 @@ static int at25_probe(struct spi_device *spi)
return -ENXIO;
}
+ at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL);
+ if (!at25)
+ return -ENOMEM;
+
mutex_init(&at25->lock);
at25->spi = spi;
spi_set_drvdata(spi, at25);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index bb9c4512c968..9fbfe784d710 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 1f15399e5cb4..b630625b3024 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -555,14 +555,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
return 0;
}
-static int eeprom_93xx46_remove(struct spi_device *spi)
+static void eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
-
- return 0;
}
static struct spi_driver eeprom_93xx46_driver = {
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4ccbf43e6bfa..aa1682b94a23 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
}
if (copy_to_user(argp, &bp, sizeof(bp))) {
- dma_buf_put(buf->dmabuf);
+ /*
+ * The usercopy failed, but we can't do much about it, as
+ * dma_buf_fd() already called fd_install() and made the
+ * file descriptor accessible for the current process. It
+ * might already be closed and dmabuf no longer valid when
+ * we reach this point. Therefore "leak" the fd and rely on
+ * the process exit path to do any required cleanup.
+ */
return -EFAULT;
}
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index 1c763796cf1f..caa3de37698b 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -117,8 +117,8 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv)
* If both powerkey down and up IRQs are received,
* handle them at the right order
*/
- generic_handle_irq(priv->irqs[POWERKEY_DOWN]);
- generic_handle_irq(priv->irqs[POWERKEY_UP]);
+ generic_handle_irq_safe(priv->irqs[POWERKEY_DOWN]);
+ generic_handle_irq_safe(priv->irqs[POWERKEY_UP]);
pending &= ~HISI_IRQ_POWERKEY_UP_DOWN;
}
@@ -126,7 +126,7 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv)
continue;
for_each_set_bit(offset, &pending, BITS_PER_BYTE) {
- generic_handle_irq(priv->irqs[offset + i * BITS_PER_BYTE]);
+ generic_handle_irq_safe(priv->irqs[offset + i * BITS_PER_BYTE]);
}
}
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 98828030b5a4..bac4df2e5231 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -211,13 +211,11 @@ static int lattice_ecp3_probe(struct spi_device *spi)
return 0;
}
-static int lattice_ecp3_remove(struct spi_device *spi)
+static void lattice_ecp3_remove(struct spi_device *spi)
{
struct fpga_data *data = spi_get_drvdata(spi);
wait_for_completion(&data->fw_loaded);
-
- return 0;
}
static const struct spi_device_id lattice_ecp3_id[] = {
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 9e40dfb60742..203a108b8883 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -96,15 +96,13 @@ static int lis302dl_spi_probe(struct spi_device *spi)
return lis3lv02d_init_device(&lis3_dev);
}
-static int lis302dl_spi_remove(struct spi_device *spi)
+static void lis302dl_spi_remove(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
lis3lv02d_remove_fs(&lis3_dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4e61b28a002f..4e67c1403cc9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
@@ -1908,7 +1908,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
cb_data.card = card;
cb_data.status = 0;
- err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
&mmc_blk_busy_cb, &cb_data);
/*
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 096ae624be9a..58a60afa650b 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -15,6 +15,7 @@
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -34,13 +35,13 @@ static ssize_t type_show(struct device *dev,
switch (card->type) {
case MMC_TYPE_MMC:
- return sprintf(buf, "MMC\n");
+ return sysfs_emit(buf, "MMC\n");
case MMC_TYPE_SD:
- return sprintf(buf, "SD\n");
+ return sysfs_emit(buf, "SD\n");
case MMC_TYPE_SDIO:
- return sprintf(buf, "SDIO\n");
+ return sysfs_emit(buf, "SDIO\n");
case MMC_TYPE_SD_COMBO:
- return sprintf(buf, "SDcombo\n");
+ return sysfs_emit(buf, "SDcombo\n");
default:
return -EFAULT;
}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 8105852c4b62..3996b191b68d 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -9,6 +9,7 @@
#define _MMC_CORE_BUS_H
#include <linux/device.h>
+#include <linux/sysfs.h>
struct mmc_host;
struct mmc_card;
@@ -17,7 +18,7 @@ struct mmc_card;
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
- return sprintf(buf, fmt, args); \
+ return sysfs_emit(buf, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index cf140f4ec864..2ed2b4d5e5a5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -588,6 +588,25 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
EXPORT_SYMBOL(mmc_alloc_host);
+static int mmc_validate_host_caps(struct mmc_host *host)
+{
+ struct device *dev = host->parent;
+ u32 caps = host->caps, caps2 = host->caps2;
+
+ if (caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
+ dev_warn(dev, "missing ->enable_sdio_irq() ops\n");
+ return -EINVAL;
+ }
+
+ if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
+ !(caps & MMC_CAP_8_BIT_DATA)) {
+ dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
+ host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
+ }
+
+ return 0;
+}
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -600,8 +619,9 @@ int mmc_add_host(struct mmc_host *host)
{
int err;
- WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
- !host->ops->enable_sdio_irq);
+ err = mmc_validate_host_caps(host);
+ if (err)
+ return err;
err = device_add(&host->class_dev);
if (err)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bbbbcaf70a59..e7ea45386c22 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -812,12 +813,11 @@ static ssize_t mmc_fwrev_show(struct device *dev,
{
struct mmc_card *card = mmc_dev_to_card(dev);
- if (card->ext_csd.rev < 7) {
- return sprintf(buf, "0x%x\n", card->cid.fwrev);
- } else {
- return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
- card->ext_csd.fwrev);
- }
+ if (card->ext_csd.rev < 7)
+ return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
+ else
+ return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
+ card->ext_csd.fwrev);
}
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
@@ -830,10 +830,10 @@ static ssize_t mmc_dsr_show(struct device *dev,
struct mmc_host *host = card->host;
if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
else
/* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
@@ -1355,11 +1355,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
int err = -EINVAL;
u8 val;
- if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
- err = -ENOTSUPP;
- goto out_err;
- }
-
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1523,13 +1518,23 @@ static int mmc_select_timing(struct mmc_card *card)
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
err = mmc_select_hs400es(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
err = mmc_select_hs200(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ if (err == -EBADMSG)
+ card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200;
+ else
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
+out:
if (err && err != -EBADMSG)
return err;
@@ -1962,7 +1967,7 @@ static int mmc_sleep(struct mmc_host *host)
goto out_release;
}
- err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host);
+ err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index d63d1c735335..180d7e9d3400 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -21,6 +21,8 @@
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
+#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -232,7 +234,9 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
+ err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
+ MMC_OP_COND_TIMEOUT_MS,
+ &__mmc_send_op_cond_cb, &cb_data);
if (err)
return err;
@@ -495,13 +499,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
return 0;
}
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data)
{
int err;
unsigned long timeout;
- unsigned int udelay = 32, udelay_max = 32768;
+ unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
@@ -546,7 +551,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
cb_data.retry_crc_err = retry_crc_err;
cb_data.busy_cmd = busy_cmd;
- return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
+ return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
}
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9c813b851d0b..09ffbc00908b 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -41,7 +41,8 @@ int mmc_can_ext_csd(struct mmc_card *card);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 45f578793980..68df6b2f49cc 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -13,6 +13,7 @@
#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -67,7 +68,7 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
@@ -708,18 +709,16 @@ MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
-static ssize_t mmc_dsr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
- struct mmc_host *host = card->host;
-
- if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
- else
- /* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
+ /* return default DSR value */
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
@@ -735,9 +734,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
@@ -1664,9 +1663,15 @@ static int sd_poweroff_notify(struct mmc_card *card)
goto out;
}
+ /* Find out when the command is completed. */
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
cb_data.card = card;
cb_data.reg_buf = reg_buf;
- err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
&sd_busy_poweroff_notify_cb, &cb_data);
out:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 41164748723d..25799accf8a0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -40,9 +41,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index fda03b35c14a..c6268c38c69e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,6 +14,7 @@
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/acpi.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -35,7 +36,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
struct sdio_func *func; \
\
func = dev_to_sdio_func (dev); \
- return sprintf(buf, format_string, args); \
+ return sysfs_emit(buf, format_string, args); \
} \
static DEVICE_ATTR_RO(field)
@@ -52,9 +53,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > func->num_info) \
return -ENODATA; \
- if (!func->info[num-1][0]) \
+ if (!func->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", func->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", func->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 52b0b27a6839..af6c3c329076 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1094,3 +1094,16 @@ config MMC_OWL
config MMC_SDHCI_EXTERNAL_DMA
bool
+
+config MMC_LITEX
+ tristate "LiteX MMC Host Controller support"
+ depends on ((PPC_MICROWATT || LITEX) && OF && HAVE_CLK) || COMPILE_TEST
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ This selects support for the MMC Host Controller found in LiteX SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called litex_mmc.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ea36d379bd3c..4e4ceb32c4b4 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_MMC_CQHCI) += cqhci.o
cqhci-y += cqhci-core.o
cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
+obj-$(CONFIG_MMC_LITEX) += litex_mmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 2a757c88f9d2..7138dfa065bf 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1189,7 +1189,6 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
static int davinci_mmcsd_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
@@ -1235,9 +1234,8 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_input_clk = clk_get_rate(host->clk);
- match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
- if (match) {
- pdev->id_entry = match->data;
+ pdev->id_entry = of_device_get_match_data(&pdev->dev);
+ if (pdev->id_entry) {
ret = mmc_of_parse(mmc);
if (ret) {
dev_err_probe(&pdev->dev, ret,
@@ -1375,8 +1373,12 @@ static int davinci_mmcsd_suspend(struct device *dev)
static int davinci_mmcsd_resume(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ return ret;
- clk_enable(host->clk);
mmc_davinci_reset_ctrl(host, 0);
return 0;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 95d0ec0f5f3a..f825487aa739 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -15,7 +15,9 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-#define RK3288_CLKGEN_DIV 2
+#define RK3288_CLKGEN_DIV 2
+
+static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
struct dw_mci_rockchip_priv_data {
struct clk *drv_clk;
@@ -51,7 +53,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
ret = clk_set_rate(host->ciu_clk, cclkin);
if (ret)
- dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+ dev_warn(host->dev, "failed to set rate %uHz err: %d\n", cclkin, ret);
bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
if (bus_hz != host->bus_hz) {
@@ -290,13 +292,30 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
static int dw_mci_rockchip_init(struct dw_mci *host)
{
+ int ret, i;
+
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
- if (of_device_is_compatible(host->dev->of_node,
- "rockchip,rk3288-dw-mshc"))
+ if (of_device_is_compatible(host->dev->of_node, "rockchip,rk3288-dw-mshc")) {
host->bus_hz /= RK3288_CLKGEN_DIV;
+ /* clock driver will fail if the clock is less than the lowest source clock
+ * divided by the internal clock divider. Test for the lowest available
+ * clock and set the minimum freq to clock / clock divider.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ ret = clk_round_rate(host->ciu_clk, freqs[i] * RK3288_CLKGEN_DIV);
+ if (ret > 0) {
+ host->minimum_speed = ret / RK3288_CLKGEN_DIV;
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_warn(host->dev, "no valid minimum freq: %d\n", ret);
+ }
+
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 42bf8a2287ba..06dc56cbada8 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2898,7 +2898,12 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- mmc->f_min = DW_MCI_FREQ_MIN;
+ /* if host has set a minimum_freq, we should respect it */
+ if (host->minimum_speed)
+ mmc->f_min = host->minimum_speed;
+ else
+ mmc->f_min = DW_MCI_FREQ_MIN;
+
if (!mmc->f_max)
mmc->f_max = DW_MCI_FREQ_MAX;
@@ -3057,8 +3062,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
dev_info(host->dev, "Using internal DMA controller.\n");
} else {
/* TRANS_MODE_EDMAC: check dma bindings again */
- if ((device_property_read_string_array(dev, "dma-names",
- NULL, 0) < 0) ||
+ if ((device_property_string_array_count(dev, "dma-names") < 0) ||
!device_property_present(dev, "dmas")) {
goto no_dma;
}
@@ -3568,7 +3572,7 @@ int dw_mci_runtime_resume(struct device *dev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
- if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
+ if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
/* Force setup bus to guarantee available clock output */
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 7f1e38621d13..4ed81f94f7ca 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -99,6 +99,7 @@ struct dw_mci_dma_slave {
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
+ * @minimum_speed: Stored minimum rate of the controller.
* @fifoth_val: The value of FIFOTH register.
* @verid: Denote Version ID.
* @dev: Device associated with the MMC controller.
@@ -201,6 +202,7 @@ struct dw_mci {
u32 bus_hz;
u32 current_speed;
+ u32 minimum_speed;
u32 fifoth_val;
u16 verid;
struct device *dev;
diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
new file mode 100644
index 000000000000..6ba0d63b8c07
--- /dev/null
+++ b/drivers/mmc/host/litex_mmc.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX LiteSDCard driver
+ *
+ * Copyright (C) 2019-2020 Antmicro <contact@antmicro.com>
+ * Copyright (C) 2019-2020 Kamil Rakoczy <krakoczy@antmicro.com>
+ * Copyright (C) 2019-2020 Maciej Dudek <mdudek@internships.antmicro.com>
+ * Copyright (C) 2020 Paul Mackerras <paulus@ozlabs.org>
+ * Copyright (C) 2020-2022 Gabriel Somlo <gsomlo@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/litex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#define LITEX_PHY_CARDDETECT 0x00
+#define LITEX_PHY_CLOCKERDIV 0x04
+#define LITEX_PHY_INITIALIZE 0x08
+#define LITEX_PHY_WRITESTATUS 0x0C
+#define LITEX_CORE_CMDARG 0x00
+#define LITEX_CORE_CMDCMD 0x04
+#define LITEX_CORE_CMDSND 0x08
+#define LITEX_CORE_CMDRSP 0x0C
+#define LITEX_CORE_CMDEVT 0x1C
+#define LITEX_CORE_DATEVT 0x20
+#define LITEX_CORE_BLKLEN 0x24
+#define LITEX_CORE_BLKCNT 0x28
+#define LITEX_BLK2MEM_BASE 0x00
+#define LITEX_BLK2MEM_LEN 0x08
+#define LITEX_BLK2MEM_ENA 0x0C
+#define LITEX_BLK2MEM_DONE 0x10
+#define LITEX_BLK2MEM_LOOP 0x14
+#define LITEX_MEM2BLK_BASE 0x00
+#define LITEX_MEM2BLK_LEN 0x08
+#define LITEX_MEM2BLK_ENA 0x0C
+#define LITEX_MEM2BLK_DONE 0x10
+#define LITEX_MEM2BLK_LOOP 0x14
+#define LITEX_MEM2BLK 0x18
+#define LITEX_IRQ_STATUS 0x00
+#define LITEX_IRQ_PENDING 0x04
+#define LITEX_IRQ_ENABLE 0x08
+
+#define SD_CTL_DATA_XFER_NONE 0
+#define SD_CTL_DATA_XFER_READ 1
+#define SD_CTL_DATA_XFER_WRITE 2
+
+#define SD_CTL_RESP_NONE 0
+#define SD_CTL_RESP_SHORT 1
+#define SD_CTL_RESP_LONG 2
+#define SD_CTL_RESP_SHORT_BUSY 3
+
+#define SD_BIT_DONE BIT(0)
+#define SD_BIT_WR_ERR BIT(1)
+#define SD_BIT_TIMEOUT BIT(2)
+#define SD_BIT_CRC_ERR BIT(3)
+
+#define SD_SLEEP_US 5
+#define SD_TIMEOUT_US 20000
+
+#define SDIRQ_CARD_DETECT 1
+#define SDIRQ_SD_TO_MEM_DONE 2
+#define SDIRQ_MEM_TO_SD_DONE 4
+#define SDIRQ_CMD_DONE 8
+
+struct litex_mmc_host {
+ struct mmc_host *mmc;
+
+ void __iomem *sdphy;
+ void __iomem *sdcore;
+ void __iomem *sdreader;
+ void __iomem *sdwriter;
+ void __iomem *sdirq;
+
+ void *buffer;
+ size_t buf_size;
+ dma_addr_t dma;
+
+ struct completion cmd_done;
+ int irq;
+
+ unsigned int ref_clk;
+ unsigned int sd_clk;
+
+ u32 resp[4];
+ u16 rca;
+
+ bool is_bus_width_set;
+ bool app_cmd;
+};
+
+static int litex_mmc_sdcard_wait_done(void __iomem *reg, struct device *dev)
+{
+ u8 evt;
+ int ret;
+
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ return ret;
+ if (evt == SD_BIT_DONE)
+ return 0;
+ if (evt & SD_BIT_WR_ERR)
+ return -EIO;
+ if (evt & SD_BIT_TIMEOUT)
+ return -ETIMEDOUT;
+ if (evt & SD_BIT_CRC_ERR)
+ return -EILSEQ;
+ dev_err(dev, "%s: unknown error (evt=%x)\n", __func__, evt);
+ return -EINVAL;
+}
+
+static int litex_mmc_send_cmd(struct litex_mmc_host *host,
+ u8 cmd, u32 arg, u8 response_len, u8 transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ void __iomem *reg;
+ int ret;
+ u8 evt;
+
+ litex_write32(host->sdcore + LITEX_CORE_CMDARG, arg);
+ litex_write32(host->sdcore + LITEX_CORE_CMDCMD,
+ cmd << 8 | transfer << 5 | response_len);
+ litex_write8(host->sdcore + LITEX_CORE_CMDSND, 1);
+
+ /*
+ * Wait for an interrupt if we have an interrupt and either there is
+ * data to be transferred, or if the card can report busy via DAT0.
+ */
+ if (host->irq > 0 &&
+ (transfer != SD_CTL_DATA_XFER_NONE ||
+ response_len == SD_CTL_RESP_SHORT_BUSY)) {
+ reinit_completion(&host->cmd_done);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CMD_DONE | SDIRQ_CARD_DETECT);
+ wait_for_completion(&host->cmd_done);
+ }
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_CMDEVT, dev);
+ if (ret) {
+ dev_err(dev, "Command (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ if (response_len != SD_CTL_RESP_NONE) {
+ /*
+ * NOTE: this matches the semantics of litex_read32()
+ * regardless of underlying arch endianness!
+ */
+ memcpy_fromio(host->resp,
+ host->sdcore + LITEX_CORE_CMDRSP, 0x10);
+ }
+
+ if (!host->app_cmd && cmd == SD_SEND_RELATIVE_ADDR)
+ host->rca = (host->resp[3] >> 16);
+
+ host->app_cmd = (cmd == MMC_APP_CMD);
+
+ if (transfer == SD_CTL_DATA_XFER_NONE)
+ return ret; /* OK from prior litex_mmc_sdcard_wait_done() */
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_DATEVT, dev);
+ if (ret) {
+ dev_err(dev, "Data xfer (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ /* Wait for completion of (read or write) DMA transfer */
+ reg = (transfer == SD_CTL_DATA_XFER_READ) ?
+ host->sdreader + LITEX_BLK2MEM_DONE :
+ host->sdwriter + LITEX_MEM2BLK_DONE;
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ dev_err(dev, "DMA timeout (cmd %d)\n", cmd);
+
+ return ret;
+}
+
+static int litex_mmc_send_app_cmd(struct litex_mmc_host *host)
+{
+ return litex_mmc_send_cmd(host, MMC_APP_CMD, host->rca << 16,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_send_set_bus_w_cmd(struct litex_mmc_host *host, u32 width)
+{
+ return litex_mmc_send_cmd(host, SD_APP_SET_BUS_WIDTH, width,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_set_bus_width(struct litex_mmc_host *host)
+{
+ bool app_cmd_sent;
+ int ret;
+
+ if (host->is_bus_width_set)
+ return 0;
+
+ /* Ensure 'app_cmd' precedes 'app_set_bus_width_cmd' */
+ app_cmd_sent = host->app_cmd; /* was preceding command app_cmd? */
+ if (!app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ /* LiteSDCard only supports 4-bit bus width */
+ ret = litex_mmc_send_set_bus_w_cmd(host, MMC_BUS_WIDTH_4);
+ if (ret)
+ return ret;
+
+ /* Re-send 'app_cmd' if necessary */
+ if (app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ host->is_bus_width_set = true;
+
+ return 0;
+}
+
+static int litex_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (!mmc_card_is_removable(mmc))
+ return 1;
+
+ ret = !litex_read8(host->sdphy + LITEX_PHY_CARDDETECT);
+ if (ret)
+ return ret;
+
+ /* Ensure bus width will be set (again) upon card (re)insertion */
+ host->is_bus_width_set = false;
+
+ return 0;
+}
+
+static irqreturn_t litex_mmc_interrupt(int irq, void *arg)
+{
+ struct mmc_host *mmc = arg;
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ u32 pending = litex_read32(host->sdirq + LITEX_IRQ_PENDING);
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Check for card change interrupt */
+ if (pending & SDIRQ_CARD_DETECT) {
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING,
+ SDIRQ_CARD_DETECT);
+ mmc_detect_change(mmc, msecs_to_jiffies(10));
+ ret = IRQ_HANDLED;
+ }
+
+ /* Check for command completed */
+ if (pending & SDIRQ_CMD_DONE) {
+ /* Disable it so it doesn't keep interrupting */
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CARD_DETECT);
+ complete(&host->cmd_done);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static u32 litex_mmc_response_len(struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136)
+ return SD_CTL_RESP_LONG;
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ return SD_CTL_RESP_NONE;
+ if (cmd->flags & MMC_RSP_BUSY)
+ return SD_CTL_RESP_SHORT_BUSY;
+ return SD_CTL_RESP_SHORT;
+}
+
+static void litex_mmc_do_dma(struct litex_mmc_host *host, struct mmc_data *data,
+ unsigned int *len, bool *direct, u8 *transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ dma_addr_t dma;
+ int sg_count;
+
+ /*
+ * Try to DMA directly to/from the data buffer.
+ * We can do that if the buffer can be mapped for DMA
+ * in one contiguous chunk.
+ */
+ dma = host->dma;
+ *len = data->blksz * data->blocks;
+ sg_count = dma_map_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (sg_count == 1) {
+ dma = sg_dma_address(data->sg);
+ *len = sg_dma_len(data->sg);
+ *direct = true;
+ } else if (*len > host->buf_size)
+ *len = host->buf_size;
+
+ if (data->flags & MMC_DATA_READ) {
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write64(host->sdreader + LITEX_BLK2MEM_BASE, dma);
+ litex_write32(host->sdreader + LITEX_BLK2MEM_LEN, *len);
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_READ;
+ } else if (data->flags & MMC_DATA_WRITE) {
+ if (!*direct)
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->buffer, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+ litex_write64(host->sdwriter + LITEX_MEM2BLK_BASE, dma);
+ litex_write32(host->sdwriter + LITEX_MEM2BLK_LEN, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_WRITE;
+ } else {
+ dev_warn(dev, "Data present w/o read or write flag.\n");
+ /* Continue: set cmd status, mark req done */
+ }
+
+ litex_write16(host->sdcore + LITEX_CORE_BLKLEN, data->blksz);
+ litex_write32(host->sdcore + LITEX_CORE_BLKCNT, data->blocks);
+}
+
+static void litex_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ struct device *dev = mmc_dev(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_command *sbc = mrq->sbc;
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *stop = mrq->stop;
+ unsigned int retries = cmd->retries;
+ unsigned int len = 0;
+ bool direct = false;
+ u32 response_len = litex_mmc_response_len(cmd);
+ u8 transfer = SD_CTL_DATA_XFER_NONE;
+
+ /* First check that the card is still there */
+ if (!litex_mmc_get_cd(mmc)) {
+ cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* Send set-block-count command if needed */
+ if (sbc) {
+ sbc->error = litex_mmc_send_cmd(host, sbc->opcode, sbc->arg,
+ litex_mmc_response_len(sbc),
+ SD_CTL_DATA_XFER_NONE);
+ if (sbc->error) {
+ host->is_bus_width_set = false;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ if (data) {
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST
+ * inject a SET_BUS_WIDTH (acmd6) before the very first data
+ * transfer, earlier than when the mmc subsystem would normally
+ * get around to it!
+ */
+ cmd->error = litex_mmc_set_bus_width(host);
+ if (cmd->error) {
+ dev_err(dev, "Can't set bus width!\n");
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ litex_mmc_do_dma(host, data, &len, &direct, &transfer);
+ }
+
+ do {
+ cmd->error = litex_mmc_send_cmd(host, cmd->opcode, cmd->arg,
+ response_len, transfer);
+ } while (cmd->error && retries-- > 0);
+
+ if (cmd->error) {
+ /* Card may be gone; don't assume bus width is still set */
+ host->is_bus_width_set = false;
+ }
+
+ if (response_len == SD_CTL_RESP_SHORT) {
+ /* Pull short response fields from appropriate host registers */
+ cmd->resp[0] = host->resp[3];
+ cmd->resp[1] = host->resp[2] & 0xFF;
+ } else if (response_len == SD_CTL_RESP_LONG) {
+ cmd->resp[0] = host->resp[0];
+ cmd->resp[1] = host->resp[1];
+ cmd->resp[2] = host->resp[2];
+ cmd->resp[3] = host->resp[3];
+ }
+
+ /* Send stop-transmission command if required */
+ if (stop && (cmd->error || !sbc)) {
+ stop->error = litex_mmc_send_cmd(host, stop->opcode, stop->arg,
+ litex_mmc_response_len(stop),
+ SD_CTL_DATA_XFER_NONE);
+ if (stop->error)
+ host->is_bus_width_set = false;
+ }
+
+ if (data) {
+ dma_unmap_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+
+ if (!cmd->error && transfer != SD_CTL_DATA_XFER_NONE) {
+ data->bytes_xfered = min(len, mmc->max_req_size);
+ if (transfer == SD_CTL_DATA_XFER_READ && !direct) {
+ sg_copy_from_buffer(data->sg, sg_nents(data->sg),
+ host->buffer, data->bytes_xfered);
+ }
+ }
+
+ mmc_request_done(mmc, mrq);
+}
+
+static void litex_mmc_setclk(struct litex_mmc_host *host, unsigned int freq)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ u32 div;
+
+ div = freq ? host->ref_clk / freq : 256U;
+ div = roundup_pow_of_two(div);
+ div = clamp(div, 2U, 256U);
+ dev_dbg(dev, "sd_clk_freq=%d: set to %d via div=%d\n",
+ freq, host->ref_clk / div, div);
+ litex_write16(host->sdphy + LITEX_PHY_CLOCKERDIV, div);
+ host->sd_clk = freq;
+}
+
+static void litex_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+
+ /*
+ * NOTE: Ignore any ios->bus_width updates; they occur right after
+ * the mmc core sends its own acmd6 bus-width change notification,
+ * which is redundant since we snoop on the command flow and inject
+ * an early acmd6 before the first data transfer command is sent!
+ */
+
+ /* Update sd_clk */
+ if (ios->clock != host->sd_clk)
+ litex_mmc_setclk(host, ios->clock);
+}
+
+static const struct mmc_host_ops litex_mmc_ops = {
+ .get_cd = litex_mmc_get_cd,
+ .request = litex_mmc_request,
+ .set_ios = litex_mmc_set_ios,
+};
+
+static int litex_mmc_irq_init(struct platform_device *pdev,
+ struct litex_mmc_host *host)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int ret;
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ host->irq = ret;
+ else {
+ dev_warn(dev, "Failed to get IRQ, using polling\n");
+ goto use_polling;
+ }
+
+ host->sdirq = devm_platform_ioremap_resource_byname(pdev, "irq");
+ if (IS_ERR(host->sdirq))
+ return PTR_ERR(host->sdirq);
+
+ ret = devm_request_irq(dev, host->irq, litex_mmc_interrupt, 0,
+ "litex-mmc", host->mmc);
+ if (ret < 0) {
+ dev_warn(dev, "IRQ request error %d, using polling\n", ret);
+ goto use_polling;
+ }
+
+ /* Clear & enable card-change interrupts */
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING, SDIRQ_CARD_DETECT);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE, SDIRQ_CARD_DETECT);
+
+ return 0;
+
+use_polling:
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ return 0;
+}
+
+static void litex_mmc_free_host_wrapper(void *mmc)
+{
+ mmc_free_host(mmc);
+}
+
+static int litex_mmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct litex_mmc_host *host;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ int ret;
+
+ /*
+ * NOTE: defaults to max_[req,seg]_size=PAGE_SIZE, max_blk_size=512,
+ * and max_blk_count accordingly set to 8;
+ * If for some reason we need to modify max_blk_count, we must also
+ * re-calculate `max_[req,seg]_size = max_blk_size * max_blk_count;`
+ */
+ mmc = mmc_alloc_host(sizeof(struct litex_mmc_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, litex_mmc_free_host_wrapper, mmc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Can't register mmc_free_host action\n");
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ /* Initialize clock source */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "can't get clock\n");
+ host->ref_clk = clk_get_rate(clk);
+ host->sd_clk = 0;
+
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST inject
+ * a SET_BUS_WIDTH (acmd6) before the very first data transfer, earlier
+ * than when the mmc subsystem would normally get around to it!
+ */
+ host->is_bus_width_set = false;
+ host->app_cmd = false;
+
+ /* LiteSDCard can support 64-bit DMA addressing */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ host->buf_size = mmc->max_req_size * 2;
+ host->buffer = dmam_alloc_coherent(dev, host->buf_size,
+ &host->dma, GFP_KERNEL);
+ if (host->buffer == NULL)
+ return -ENOMEM;
+
+ host->sdphy = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(host->sdphy))
+ return PTR_ERR(host->sdphy);
+
+ host->sdcore = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(host->sdcore))
+ return PTR_ERR(host->sdcore);
+
+ host->sdreader = devm_platform_ioremap_resource_byname(pdev, "reader");
+ if (IS_ERR(host->sdreader))
+ return PTR_ERR(host->sdreader);
+
+ host->sdwriter = devm_platform_ioremap_resource_byname(pdev, "writer");
+ if (IS_ERR(host->sdwriter))
+ return PTR_ERR(host->sdwriter);
+
+ /* Ensure DMA bus masters are disabled */
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+
+ init_completion(&host->cmd_done);
+ ret = litex_mmc_irq_init(pdev, host);
+ if (ret)
+ return ret;
+
+ mmc->ops = &litex_mmc_ops;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret || mmc->ocr_avail == 0) {
+ dev_warn(dev, "can't get voltage, defaulting to 3.3V\n");
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ /*
+ * Set default sd_clk frequency range based on empirical observations
+ * of LiteSDCard gateware behavior on typical SDCard media
+ */
+ mmc->f_min = 12.5e6;
+ mmc->f_max = 50e6;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ /* Force 4-bit bus_width (only width supported by hardware) */
+ mmc->caps &= ~MMC_CAP_8_BIT_DATA;
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ /* Set default capabilities */
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
+ MMC_CAP_DRIVER_TYPE_D |
+ MMC_CAP_CMD23;
+ mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT |
+ MMC_CAP2_NO_SDIO |
+ MMC_CAP2_NO_MMC;
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "LiteX MMC controller initialized.\n");
+ return 0;
+}
+
+static int litex_mmc_remove(struct platform_device *pdev)
+{
+ struct litex_mmc_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+ return 0;
+}
+
+static const struct of_device_id litex_match[] = {
+ { .compatible = "litex,mmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, litex_match);
+
+static struct platform_driver litex_mmc_driver = {
+ .probe = litex_mmc_probe,
+ .remove = litex_mmc_remove,
+ .driver = {
+ .name = "litex-mmc",
+ .of_match_table = litex_match,
+ },
+};
+module_platform_driver(litex_mmc_driver);
+
+MODULE_DESCRIPTION("LiteX SDCard driver");
+MODULE_AUTHOR("Antmicro <contact@antmicro.com>");
+MODULE_AUTHOR("Kamil Rakoczy <krakoczy@antmicro.com>");
+MODULE_AUTHOR("Maciej Dudek <mdudek@internships.antmicro.com>");
+MODULE_AUTHOR("Paul Mackerras <paulus@ozlabs.org>");
+MODULE_AUTHOR("Gabriel Somlo <gsomlo@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 8f36536cb1b6..58ab9d90bc8b 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -173,6 +173,8 @@ struct meson_host {
int irq;
bool vqmmc_enabled;
+ bool needs_pre_post_req;
+
};
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
struct meson_host *host = mmc_priv(mmc);
host->cmd = NULL;
+ if (host->needs_pre_post_req)
+ meson_mmc_post_req(mmc, mrq, 0);
mmc_request_done(host->mmc, mrq);
}
@@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_host *host = mmc_priv(mmc);
- bool needs_pre_post_req = mrq->data &&
+ host->needs_pre_post_req = mrq->data &&
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
/*
@@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
}
- if (needs_pre_post_req) {
+ if (host->needs_pre_post_req) {
meson_mmc_get_transfer_mode(mmc, mrq);
if (!meson_mmc_desc_chain_mode(mrq->data))
- needs_pre_post_req = false;
+ host->needs_pre_post_req = false;
}
- if (needs_pre_post_req)
+ if (host->needs_pre_post_req)
meson_mmc_pre_req(mmc, mrq);
/* Stop execution */
writel(0, host->regs + SD_EMMC_START);
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
-
- if (needs_pre_post_req)
- meson_mmc_post_req(mmc, mrq, 0);
}
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a576181e9db0..106dd204b1a7 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1489,7 +1489,7 @@ nomem:
}
-static int mmc_spi_remove(struct spi_device *spi)
+static void mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
struct mmc_spi_host *host = mmc_priv(mmc);
@@ -1507,7 +1507,6 @@ static int mmc_spi_remove(struct spi_device *spi)
spi->max_speed_hz = mmc->f_max;
mmc_spi_put_pdata(spi);
mmc_free_host(mmc);
- return 0;
}
static const struct spi_device_id mmc_spi_dev_ids[] = {
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 16d1c7a43d33..b6eb75f4bbfc 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 65037e1d7723..e61b0b98065a 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1911,8 +1911,8 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
else
final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
- dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
- delay, len_final, final_phase);
+ dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ delay, len_final, final_phase);
delay_phase.maxlen = len_final;
delay_phase.start = start_final;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 9d2c600fd4ce..1685df00863b 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -212,7 +212,7 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { /* Sentinel. */ },
+ { /* Sentinel. */ }
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 58cfaffa3c2d..219029224727 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -38,10 +38,7 @@ struct realtek_pci_sdmmc {
bool double_clk;
bool eject;
bool initial_mode;
- int power_state;
-#define SDMMC_POWER_ON 1
-#define SDMMC_POWER_OFF 0
-
+ int prev_power_state;
int sg_count;
s32 cookie;
int cookie_sg_count;
@@ -905,7 +902,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
return err;
}
-static int sd_power_on(struct realtek_pci_sdmmc *host)
+static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
@@ -913,9 +910,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
u32 val;
u8 test_mode;
- if (host->power_state == SDMMC_POWER_ON)
+ if (host->prev_power_state == MMC_POWER_ON)
return 0;
+ if (host->prev_power_state == MMC_POWER_UP) {
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
+ goto finish;
+ }
+
msleep(100);
rtsx_pci_init_cmd(pcr);
@@ -936,10 +938,15 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (err < 0)
return err;
+ mdelay(1);
+
err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
if (err < 0)
return err;
+ /* send at least 74 clocks */
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+
if (PCI_PID(pcr) == PID_5261) {
/*
* If test mode is set switch to SD Express mandatorily,
@@ -964,7 +971,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
}
}
- host->power_state = SDMMC_POWER_ON;
+finish:
+ host->prev_power_state = power_mode;
return 0;
}
@@ -973,7 +981,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
rtsx_pci_init_cmd(pcr);
@@ -999,7 +1007,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
if (power_mode == MMC_POWER_OFF)
err = sd_power_off(host);
else
- err = sd_power_on(host);
+ err = sd_power_on(host, power_mode);
return err;
}
@@ -1482,10 +1490,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->pcr = pcr;
+ mmc->ios.power_delay_ms = 5;
host->mmc = mmc;
host->pdev = pdev;
host->cookie = -1;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d1a1c548c515..10fb4cb2c731 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -308,17 +308,15 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
static int sdhci_at91_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
int ret;
- match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index a593b1fbd69e..d9dc41143bb3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
+ int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
- of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+ }
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
@@ -930,7 +934,7 @@ static struct soc_device_attribute soc_tuning_erratum_type1[] = {
{ .family = "QorIQ T1040", },
{ .family = "QorIQ T2080", },
{ .family = "QorIQ LS1021A", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
@@ -940,7 +944,7 @@ static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1080A", },
{ .family = "QorIQ LS2080A", },
{ .family = "QorIQ LA1575A", },
- { },
+ { /* sentinel */ }
};
static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
@@ -1312,21 +1316,21 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
static struct soc_device_attribute soc_incorrect_hostver[] = {
{ .family = "QorIQ T4240", .revision = "1.0", },
{ .family = "QorIQ T4240", .revision = "2.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 97035d77c18c..d09728c37d03 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -13,6 +13,7 @@
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/iopoll.h>
#include "sdhci.h"
#include "sdhci-pci.h"
#include "cqhci.h"
@@ -63,6 +64,7 @@
#define GLI_9750_MISC_RX_INV_OFF 0x0
#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF
#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
+#define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26)
#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
@@ -137,6 +139,9 @@
#define PCI_GLI_9755_SerDes 0x70
#define PCI_GLI_9755_SCP_DIS BIT(19)
+#define PCI_GLI_9755_MISC 0x78
+#define PCI_GLI_9755_MISC_SSC_OFF BIT(26)
+
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
@@ -371,6 +376,19 @@ static void gl9750_set_pll(struct sdhci_host *host, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9750_ssc_enable(struct sdhci_host *host)
+{
+ u32 misc;
+ u8 off;
+
+ gl9750_wt_on(host);
+ misc = sdhci_readl(host, SDHCI_GLI_9750_MISC);
+ off = FIELD_GET(SDHCI_GLI_9750_MISC_SSC_OFF, misc);
+ gl9750_wt_off(host);
+
+ return !off;
+}
+
static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -392,11 +410,31 @@ static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
static void gl9750_set_ssc_pll_205mhz(struct sdhci_host *host)
{
- /* set pll to 205MHz and enable ssc */
- gl9750_set_ssc(host, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 205MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xF, 0x5A1D);
gl9750_set_pll(host, 0x1, 0x246, 0x0);
}
+static void gl9750_set_ssc_pll_100mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 100MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x1);
+}
+
+static void gl9750_set_ssc_pll_50mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 50MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct mmc_ios *ios = &host->mmc->ios;
@@ -414,6 +452,10 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9750_set_ssc_pll_205mhz(host);
+ } else if (clock == 100000000) {
+ gl9750_set_ssc_pll_100mhz(host);
+ } else if (clock == 50000000) {
+ gl9750_set_ssc_pll_50mhz(host);
}
sdhci_enable_clk(host, clk);
@@ -514,6 +556,19 @@ static void gl9755_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9755_ssc_enable(struct pci_dev *pdev)
+{
+ u32 misc;
+ u8 off;
+
+ gl9755_wt_on(pdev);
+ pci_read_config_dword(pdev, PCI_GLI_9755_MISC, &misc);
+ off = FIELD_GET(PCI_GLI_9755_MISC_SSC_OFF, misc);
+ gl9755_wt_off(pdev);
+
+ return !off;
+}
+
static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -535,11 +590,31 @@ static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
static void gl9755_set_ssc_pll_205mhz(struct pci_dev *pdev)
{
- /* set pll to 205MHz and enable ssc */
- gl9755_set_ssc(pdev, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 205MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xF, 0x5A1D);
gl9755_set_pll(pdev, 0x1, 0x246, 0x0);
}
+static void gl9755_set_ssc_pll_100mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 100MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x1);
+}
+
+static void gl9755_set_ssc_pll_50mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 50MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -560,6 +635,10 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9755_set_ssc_pll_205mhz(pdev);
+ } else if (clock == 100000000) {
+ gl9755_set_ssc_pll_100mhz(pdev);
+ } else if (clock == 50000000) {
+ gl9755_set_ssc_pll_50mhz(pdev);
}
sdhci_enable_clk(host, clk);
@@ -873,6 +952,47 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
+#ifdef CONFIG_PM
+static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+
+static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ clock |= SDHCI_CLOCK_PLL_EN;
+ clock &= ~SDHCI_CLOCK_INT_STABLE;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 150 ms */
+ if (read_poll_timeout(sdhci_readw, clock, (clock & SDHCI_CLOCK_INT_STABLE),
+ 1000, 150000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: PLL clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ }
+
+ clock |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -983,5 +1103,10 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.resume = sdhci_cqhci_gli_resume,
.suspend = sdhci_cqhci_gli_suspend,
#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = gl9763e_runtime_suspend,
+ .runtime_resume = gl9763e_runtime_resume,
+ .allow_runtime_pm = true,
+#endif
.add_host = gl9763e_add_host,
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 35ebba067e87..2d2d8260c681 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1618,7 +1618,6 @@ cleanup:
static int sdhci_tegra_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -1626,10 +1625,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
struct clk *clk;
int rc;
- match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
if (IS_ERR(host))
@@ -1673,6 +1671,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
/* HW busy detection is supported, but R1B responses are required. */
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+ /* GPIO CD can be set as a wakeup source */
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
+
tegra_sdhci_parse_dt(host);
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
@@ -1840,7 +1841,7 @@ static int sdhci_tegra_suspend(struct device *dev)
return ret;
}
- return 0;
+ return mmc_gpio_set_cd_wake(host->mmc, true);
}
static int sdhci_tegra_resume(struct device *dev)
@@ -1848,6 +1849,10 @@ static int sdhci_tegra_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
+ if (ret)
+ return ret;
+
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f654afbe8e83..e54fe24d47e7 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -514,26 +514,6 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
-static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = {
- .ops = &sdhci_j721e_8bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = {
- .pdata = &sdhci_am64_8bit_pdata,
- .flags = DLL_PRESENT | DLL_CALIB,
-};
-
-static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = {
- .ops = &sdhci_j721e_4bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = {
- .pdata = &sdhci_am64_4bit_pdata,
- .flags = IOMUX_PRESENT,
-};
-
static const struct soc_device_attribute sdhci_am654_devices[] = {
{ .family = "AM65X",
.revision = "SR1.0",
@@ -759,11 +739,15 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{
.compatible = "ti,am64-sdhci-8bit",
- .data = &sdhci_am64_8bit_drvdata,
+ .data = &sdhci_j721e_8bit_drvdata,
},
{
.compatible = "ti,am64-sdhci-4bit",
- .data = &sdhci_am64_4bit_drvdata,
+ .data = &sdhci_j721e_4bit_drvdata,
+ },
+ {
+ .compatible = "ti,am62-sdhci",
+ .data = &sdhci_j721e_4bit_drvdata,
},
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index bcc595c70a9f..5f9ebf045b1c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
@@ -518,8 +521,7 @@ static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
}
dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
- (best_freq / (1 << (clkdiv + 1))), clk,
- best_freq, clkdiv);
+ (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
clk_set_rate(host->clk, best_freq);
clkdiv = clkdiv << 16;
@@ -1009,8 +1011,8 @@ static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
*/
host->clkdiv_map = 0x3ff;
- host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
- host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
+ host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
+ host->mmc->f_min = f_min >> fls(host->clkdiv_map);
} else {
unsigned int clk = clk_get_rate(host->clk);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 2702736a1c57..c62afd212692 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1167,6 +1167,14 @@ static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
.can_calibrate = false,
};
+static const struct sunxi_mmc_cfg sun20i_d1_cfg = {
+ .idma_des_size_bits = 13,
+ .idma_des_shift = 2,
+ .can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
.idma_des_size_bits = 16,
.clk_delays = NULL,
@@ -1205,6 +1213,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg },
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
+ { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
{ .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index f936aad945ce..e754bb3f5c32 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -186,10 +186,6 @@ struct tmio_mmc_host {
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
- void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
- void (*hs400_downgrade)(struct tmio_mmc_host *host);
- void (*hs400_complete)(struct tmio_mmc_host *host);
-
const struct tmio_mmc_dma_ops *dma_ops;
};
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index cf10949fb0ac..163ac9df8cca 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -751,19 +751,16 @@ static int wmt_mci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(wmt_mci_dt_ids, &pdev->dev);
const struct wmt_mci_caps *wmt_caps;
int ret;
int regular_irq, dma_irq;
- if (!of_id || !of_id->data) {
+ wmt_caps = of_device_get_match_data(&pdev->dev);
+ if (!wmt_caps) {
dev_err(&pdev->dev, "Controller capabilities data missing\n");
return -EFAULT;
}
- wmt_caps = of_id->data;
-
if (!np) {
dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
return -EFAULT;
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index a8b31bddf14b..008df9d8898d 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -209,13 +209,11 @@ static int mchp23k256_probe(struct spi_device *spi)
return 0;
}
-static int mchp23k256_remove(struct spi_device *spi)
+static void mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static const struct of_device_id mchp23k256_of_table[] = {
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 231a10790196..a3fd426df74b 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -341,13 +341,11 @@ static int mchp48l640_probe(struct spi_device *spi)
return 0;
}
-static int mchp48l640_remove(struct spi_device *spi)
+static void mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static const struct of_device_id mchp48l640_of_table[] = {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 734878abaa23..134e27328597 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -916,7 +916,7 @@ static int dataflash_probe(struct spi_device *spi)
return status;
}
-static int dataflash_remove(struct spi_device *spi)
+static void dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
@@ -925,8 +925,6 @@ static int dataflash_remove(struct spi_device *spi)
WARN_ON(mtd_device_unregister(&flash->mtd));
kfree(flash);
-
- return 0;
}
static struct spi_driver dataflash_driver = {
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 6ed6c51fac69..d503821a3e60 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
}
}
- if (erasesize)
- div_u64_rem(len, (uint32_t)erasesize, &rem);
-
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || rem) {
+ || erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 7f124c1bfa40..8813994ce9f4 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -398,13 +398,11 @@ static int sst25l_probe(struct spi_device *spi)
return 0;
}
-static int sst25l_remove(struct spi_device *spi)
+static void sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 70f492dce158..eef87b28d6c8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.stride = 1;
config.read_only = true;
config.root_only = true;
+ config.ignore_wp = true;
config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
@@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
+ config.ignore_wp = true;
config.reg_read = reg_read;
config.size = size;
config.of_node = np;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index e86b04bc1d6b..dc7f1532a37f 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 20408b7db540..820e5dc3bc9b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -42,7 +42,7 @@ config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
- select OMAP_GPMC if ARCH_K3
+ depends on OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index f75929783b94..aee78f5f4f15 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
- if (!ret) {
+ if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1b64c5a5140d..ded4df473928 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->hw.must_apply_timings = false;
ret = gpmi_nfc_apply_timings(this);
if (ret)
- return ret;
+ goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2414,6 +2414,7 @@ unmap:
this->bch = false;
+out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index efe0ffe4f1ab..9054559e52dd 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7c6efa3b6255..1a77542c6d67 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,7 +2,6 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
- ret = qcom_nandc_alloc(nandc);
- if (ret)
- goto err_nandc_alloc;
-
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
@@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
goto err_aon_clk;
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
@@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
return 0;
err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
-
return ret;
}
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..52ce5162538a 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -6,7 +6,6 @@
* Based on Sharp's NAND driver sharp_sl.c
*/
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index 06a818cd2433..4311b89d8df0 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
struct mtd_partition *parts;
- int ret, i, numparts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
- pr_err("Error reading partition table header\n");
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
}
/* Ensure that # of partitions is less than the max we have allocated */
- numparts = le32_to_cpu(ptable->numparts);
- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
return PTR_ERR(ptable);
}
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
- for (i = 0; i < numparts; i++) {
+ for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
- parts[i].name = name;
- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
- parts[i].mask_flags = pentry->attr;
- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
+ j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
- le32_to_cpu(ptable->version), numparts);
+ le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
- while (--i >= 0)
- kfree(parts[i].name);
+ while (--j >= 0)
+ kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
+
+ kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
index 5c0e0ec2e6d1..50f4f3484d42 100644
--- a/drivers/mtd/spi-nor/controllers/Kconfig
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -26,39 +26,3 @@ config SPI_NXP_SPIFI
SPIFI is a specialized controller for connecting serial SPI
Flash. Enable this option if you have a device with a SPIFI
controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
index e7abba491d98..6e2a1dc68466 100644
--- a/drivers/mtd/spi-nor/controllers/Makefile
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -2,6 +2,3 @@
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
deleted file mode 100644
index f2871179fd34..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel PCH/PCU SPI flash driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef INTEL_SPI_H
-#define INTEL_SPI_H
-
-#include <linux/platform_data/x86/intel-spi.h>
-
-struct intel_spi;
-struct resource;
-
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info);
-int intel_spi_remove(struct intel_spi *ispi);
-
-#endif /* INTEL_SPI_H */
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..c580acb8b1d3 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..a86b1f71762e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1994,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
@@ -2278,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ec498ce70f35..aebeb46e6fa6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
@@ -4133,9 +4132,7 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
fallthrough;
case SIOCGHWTSTAMP:
- rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
- rcu_read_unlock();
if (!real_dev)
return -EOPNOTSUPP;
@@ -5382,9 +5379,7 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
struct net_device *real_dev;
struct phy_device *phydev;
- rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
- rcu_read_unlock();
if (real_dev) {
ops = real_dev->ethtool_ops;
phydev = real_dev->phydev;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 0bff1884d5cc..74d7fcbfd065 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -296,6 +296,7 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index fccdff8b1f0f..23fc09a7e10f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -21,7 +21,7 @@
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
* Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
* MX25 FlexCAN2 03.00.00.00 no no no no no no 64
* MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
* MX35 FlexCAN2 03.00.00.00 no no no no no no 64
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 5b47cd867783..1a4b56f6fa8c 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -336,6 +336,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +349,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 04687b15b250..41645a24384c 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -388,7 +388,7 @@ out_power:
return ret;
}
-static int tcan4x5x_can_remove(struct spi_device *spi)
+static void tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
@@ -397,8 +397,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
tcan4x5x_power_enable(priv->power, 0);
m_can_class_free_dev(priv->cdev.net);
-
- return 0;
}
static const struct of_device_id tcan4x5x_of_match[] = {
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..26e212b8ca7a 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b7dc1c32875f..acd74725831f 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1715,15 +1715,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ devm_can_led_init(ndev);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev,
"register_candev() failed, error %d\n", err);
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
return 0;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index cfcc14fe3e42..664b8f14d7b0 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -948,7 +948,7 @@ static int hi3110_can_probe(struct spi_device *spi)
return dev_err_probe(dev, ret, "Probe failed\n");
}
-static int hi3110_can_remove(struct spi_device *spi)
+static void hi3110_can_remove(struct spi_device *spi)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -960,8 +960,6 @@ static int hi3110_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused hi3110_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 025e07cb7439..d23edaf22420 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1427,7 +1427,7 @@ out_free:
return ret;
}
-static int mcp251x_can_remove(struct spi_device *spi)
+static void mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1442,8 +1442,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused mcp251x_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index b5986df6eca0..65c9b31666a6 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1966,7 +1966,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
return err;
}
-static int mcp251xfd_remove(struct spi_device *spi)
+static void mcp251xfd_remove(struct spi_device *spi)
{
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
@@ -1975,8 +1975,6 @@ static int mcp251xfd_remove(struct spi_device *spi)
mcp251xfd_unregister(priv);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
-
- return 0;
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2ed2370a3166..2d73ebbf3836 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..e5033cb5e695 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -373,8 +373,6 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -384,6 +382,10 @@ struct es58x_operators {
* in RX branches.
* @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -406,7 +408,6 @@ struct es58x_device {
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
u64 ktime_req_ns;
s64 realtime_diff_ns;
@@ -415,6 +416,7 @@ struct es58x_device {
u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b487e3fe770a..d35749fad1ef 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -191,8 +191,8 @@ struct gs_can {
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+ u8 active_channels;
};
/* 'allocate' a tx context.
@@ -589,7 +589,7 @@ static int gs_can_open(struct net_device *netdev)
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
@@ -690,6 +690,7 @@ static int gs_can_open(struct net_device *netdev)
dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -705,7 +706,8 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels)
usb_kill_anchored_urbs(&parent->rx_submitted);
/* Stop sending URBs */
@@ -984,8 +986,6 @@ static int gs_usb_probe(struct usb_interface *intf,
init_usb_anchor(&dev->rx_submitted);
- atomic_set(&dev->active_channels, 0);
-
usb_set_intfdata(intf, dev);
dev->udev = interface_to_usbdev(intf);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..0029d279616f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
help
This enables support for the MediaTek MT7530, MT7531, and MT7621
Ethernet switch chips.
@@ -81,6 +82,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 2b88f03e5252..0e54b2a0c211 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -314,7 +314,7 @@ static int b53_spi_probe(struct spi_device *spi)
return 0;
}
-static int b53_spi_remove(struct spi_device *spi)
+static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
@@ -322,8 +322,6 @@ static int b53_spi_remove(struct spi_device *spi)
b53_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 33499fcd8848..6afb5db8244c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
@@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d55784d19fa4..3969d89fa4db 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,6 +10,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
@@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 46ed953e787e..8a7a8093a156 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -2145,8 +2150,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2170,6 +2177,7 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index 866767b70d65..5f8d94aee774 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -87,7 +87,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return 0;
}
-static int ksz8795_spi_remove(struct spi_device *spi)
+static void ksz8795_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -95,8 +95,6 @@ static int ksz8795_spi_remove(struct spi_device *spi)
ksz_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void ksz8795_spi_shutdown(struct spi_device *spi)
@@ -124,12 +122,23 @@ static const struct of_device_id ksz8795_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+static const struct spi_device_id ksz8795_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+
static struct spi_driver ksz8795_spi_driver = {
.driver = {
.name = "ksz8795-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz8795_dt_ids),
},
+ .id_table = ksz8795_spi_ids,
.probe = ksz8795_spi_probe,
.remove = ksz8795_spi_remove,
.shutdown = ksz8795_spi_shutdown,
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index e3cb0e6c9f6f..87ca464dad32 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -65,7 +65,7 @@ static int ksz9477_spi_probe(struct spi_device *spi)
return 0;
}
-static int ksz9477_spi_remove(struct spi_device *spi)
+static void ksz9477_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -73,8 +73,6 @@ static int ksz9477_spi_remove(struct spi_device *spi)
ksz_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void ksz9477_spi_shutdown(struct spi_device *spi)
@@ -98,12 +96,24 @@ static const struct of_device_id ksz9477_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static const struct spi_device_id ksz9477_spi_ids[] = {
+ { "ksz9477" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
+
static struct spi_driver ksz9477_spi_driver = {
.driver = {
.name = "ksz9477-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
+ .id_table = ksz9477_spi_ids,
.probe = ksz9477_spi_probe,
.remove = ksz9477_spi_remove,
.shutdown = ksz9477_spi_shutdown,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 55dbda04ea62..243f8ad6d06e 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
- int i;
+ int i, j;
if (!dsa_is_user_port(ds, port))
return;
@@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
+ continue;
- if (other_p->stp_state == BR_STATE_FORWARDING &&
- p->stp_state == BR_STATE_FORWARDING) {
+ if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b82512e5b33b..a251bc55727f 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
@@ -2936,7 +2936,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set_port_modes(mask);
- if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+ if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
!phy_interface_mode_is_8023z(state->interface)) {
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 58ca684d73f7..ab1676553714 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -3399,7 +3406,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@@ -3424,14 +3431,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
- return err;
+ goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
+ goto out;
}
if (external)
@@ -3440,21 +3447,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
list_add(&mdio_bus->list, &chip->mdios);
return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
+ mdiobus_free(bus);
}
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index bf8d38239e7e..33f0ceae381d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 8c1c9da61602..f2f1608a476c 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
}
/* Needed in order to initialize the bus mutex lock */
- rc = of_mdiobus_register(bus, NULL);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..c39de2a4c1fe 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b513713be610..c2a47c6693b8 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3346,18 +3346,16 @@ static int sja1105_probe(struct spi_device *spi)
return dsa_register_switch(priv->ds);
}
-static int sja1105_remove(struct spi_device *spi)
+static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void sja1105_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 645398901e05..3110895358d8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -159,18 +159,16 @@ static int vsc73xx_spi_probe(struct spi_device *spi)
return vsc73xx_probe(&vsc_spi->vsc);
}
-static int vsc73xx_spi_remove(struct spi_device *spi)
+static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
- return 0;
+ return;
vsc73xx_remove(&vsc_spi->vsc);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 481f1df3106c..8aec5d9fbfef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2278,6 +2278,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2409,8 +2410,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..e7b879123bb1 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e320cccba61a..90cd7bdf06f5 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -405,12 +405,12 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
dev_err(&pdev->dev, "no IRQ specified?\n");
return -ENXIO;
}
@@ -433,7 +433,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 492ac383f16d..a3593290886f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
+ /* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -2550,6 +2552,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2579,8 +2589,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index efdcf484a510..2af3da4b2d05 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 9acf589b1178..87f40c2ba904 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
+ const char *name = "Synopsys MII Bus";
struct mii_bus *bus;
int error;
@@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus";
+ bus->name = name;
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
@@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
- "cannot register MDIO bus %s\n", bus->name);
+ "cannot register MDIO bus %s\n", name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e7a9f9863258..bf70481bb1ca 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -1102,7 +1102,7 @@ err:
return ret;
}
-static int ax88796c_remove(struct spi_device *spi)
+static void ax88796c_remove(struct spi_device *spi)
{
struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
struct net_device *ndev = ax_local->ndev;
@@ -1112,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi)
netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
dev_driver_string(&spi->dev),
dev_name(&spi->dev));
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ad3fc72e74e..a89b93cb4e26 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
+ mutex_unlock(&alx->mtx);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..f50604f3e541 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e20aafeb4ca9..b97ed9b5f685 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a19dd6797070..2209d99b3404 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-int bnx2x_init_firmware(struct bnx2x *bp);
-void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8d36ebbf08e1..5729a5ab059d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2364,24 +2364,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build my FW version dword */
- u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
- (bp->fw_rev << 16) + (bp->fw_eng << 24);
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
/* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
- loaded_fw, my_fw);
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
if (print_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
else
- BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
- loaded_fw, my_fw);
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
return -EBUSY;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 774c1f1a57c3..c19b072f3a23 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
@@ -12316,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
- if (IS_PF(bp)) {
- rc = bnx2x_init_firmware(bp);
-
- if (rc) {
- bnx2x_free_mem_bp(bp);
- return rc;
- }
- }
-
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12337,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13406,7 +13399,7 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -13506,7 +13499,7 @@ request_firmware_exit:
return rc;
}
-void bnx2x_release_firmware(struct bnx2x *bp)
+static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14023,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4f94136a011a..b1c98d1408b8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4747,8 +4747,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
@@ -7787,6 +7789,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8639,6 +8654,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8648,7 +8666,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8659,6 +8677,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9850,8 +9869,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10330,13 +10349,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10344,7 +10365,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -10354,9 +10375,10 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10772,7 +10794,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -10849,9 +10871,10 @@ skip_uc:
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 440dfeb4948b..666fc1e7a7d2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1921,6 +1921,7 @@ struct bnxt {
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4da31b1b84f9..f6e21fac0e69 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
}
}
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
@@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
u32 installed = 0;
- u16 flags;
u8 target;
int rc;
@@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
hwrm_req_drop(bp, query_req);
return rc;
}
- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
@@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
flags = le16_to_cpu(query_resp->status_flags);
- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
continue;
- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
- !strncmp(query_resp->active_ver, query_resp->install_ver,
- sizeof(query_resp->active_ver)))
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
@@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
if (!rc && !installed) {
- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
- rc = -ENOENT;
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 003330e8cd58..8aaa2335f848 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -25,6 +25,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
@@ -1969,6 +1970,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
@@ -3454,7 +3458,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3548,9 +3552,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3560,6 +3567,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -3585,7 +3593,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 566c9487ef55..b01d42928a53 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i), req_type,
+ hwrm_total_timeout(i) + j, req_type,
le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index d52bd2d63aec..c98032e38188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 87f1056e29ff..2da804f84b48 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2287,8 +2287,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dma_length_status = status->length_status;
if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (rx_csum) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index e31a5a397f11..f55d9d9c01a8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -40,6 +40,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..a1a38456c9a3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a363da928e8b..d13f06cf0308 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1573,7 +1573,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
+ /* RSR bits only seem to propagate to raise interrupts when
+ * interrupts are enabled at the time, so if bits are already
+ * set due to packets received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here. This has been seen to happen
+ * around 30% of the time under heavy network load.
+ */
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1581,6 +1588,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi);
} else {
queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* In rare cases, packets could have been received in
+ * the window between the check above and re-enabling
+ * interrupts. Therefore, a double-check is required
+ * to avoid losing a wakeup. This can potentially race
+ * with the interrupt handler doing the same actions
+ * if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
+ napi_schedule(napi);
+ }
}
}
@@ -4712,7 +4735,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index da41eee2f25c..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 691605c15265..d5356db7539a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index e985ae008a97..0f90d2d5bb60 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+
+ unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
- unregister_netdev(net_dev);
-
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..cacd454ac696 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..be0bd4b44926 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -99,13 +99,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +893,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ff756265d58f..9a2c16d69e2c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev,
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 5f5d4f7aa813..160735484465 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -843,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 2ad7f57f7e5b..f7621ab672b9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
*/
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{
- u32 tail, head;
+ int tail, head;
int i;
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index f7f65c4bf993..54e51c8221b8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -766,9 +766,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -811,7 +811,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9ddcc497f48e..e4e98aa7745f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -608,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
+ skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..8c939628e2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
if (err)
return err;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7df87610ad96..21442a9bb996 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2043,8 +2043,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 59536bd5cab1..b423e94956f1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
@@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
@@ -1493,6 +1491,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
@@ -2208,6 +2213,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
}
/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+}
+
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2313,6 +2331,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_init_done(adapter);
+
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_crq_queue(adapter);
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2456,7 +2476,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- reinit_completion(&adapter->init_done);
+ reinit_init_done(adapter);
+
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -2597,22 +2618,82 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_adapter *adapter;
- bool saved_state = false;
+ unsigned int timeout = 5000;
struct ibmvnic_rwi *tmprwi;
+ bool saved_state = false;
struct ibmvnic_rwi *rwi;
unsigned long flags;
+ struct device *dev;
+ bool need_reset;
+ int num_fails = 0;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ dev = &adapter->vdev->dev;
- if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+ * or if another reset is in progress, defer work for now. If probe
+ * eventually fails it will flush and terminate our work.
+ *
+ * Three possibilities here:
+ * 1. Adpater being removed - just return
+ * 2. Timed out on probe or another reset in progress - delay the work
+ * 3. Completed probe - perform any resets in queue
+ */
+ if (adapter->state == VNIC_PROBING &&
+ !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+ dev_err(dev, "Reset thread timed out on probe");
queue_delayed_work(system_long_wq,
&adapter->ibmvnic_delayed_reset,
IBMVNIC_RESET_DELAY);
return;
}
+ /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+ if (adapter->state == VNIC_REMOVING)
+ return;
+
+ /* ->rwi_list is stable now (no one else is removing entries) */
+
+ /* ibmvnic_probe() may have purged the reset queue after we were
+ * scheduled to process a reset so there maybe no resets to process.
+ * Before setting the ->resetting bit though, we have to make sure
+ * that there is infact a reset to process. Otherwise we may race
+ * with ibmvnic_open() and end up leaving the vnic down:
+ *
+ * __ibmvnic_reset() ibmvnic_open()
+ * ----------------- --------------
+ *
+ * set ->resetting bit
+ * find ->resetting bit is set
+ * set ->state to IBMVNIC_OPEN (i.e
+ * assume reset will open device)
+ * return
+ * find reset queue empty
+ * return
+ *
+ * Neither performed vnic login/open and vnic stays down
+ *
+ * If we hold the lock and conditionally set the bit, either we
+ * or ibmvnic_open() will complete the open.
+ */
+ need_reset = false;
+ spin_lock(&adapter->rwi_lock);
+ if (!list_empty(&adapter->rwi_list)) {
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ } else {
+ need_reset = true;
+ }
+ }
+ spin_unlock(&adapter->rwi_lock);
+
+ if (!need_reset)
+ return;
+
rwi = get_next_rwi(adapter);
while (rwi) {
spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2655,11 +2736,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -2717,12 +2810,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
__ibmvnic_reset(&adapter->ibmvnic_reset);
}
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+ struct list_head *entry, *tmp_entry;
+
+ if (!list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+ list_del(entry);
+ kfree(list_entry(entry, struct ibmvnic_rwi, list));
+ }
+ }
+}
+
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
- struct list_head *entry, *tmp_entry;
- struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ struct ibmvnic_rwi *rwi, *tmp;
unsigned long flags;
int ret;
@@ -2741,13 +2845,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- if (adapter->state == VNIC_PROBING) {
- netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = -EAGAIN;
- ret = EAGAIN;
- goto err;
- }
-
list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2765,10 +2862,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
/* if we just received a transport event,
* flush reset queue and process this reset
*/
- if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
- list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
- list_del(entry);
- }
+ if (adapter->force_reset_recovery)
+ flush_reset_queue(adapter);
+
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -3844,11 +3940,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3909,44 +4019,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3954,16 +4065,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4357,118 +4473,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4772,6 +4902,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4835,10 +4967,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5136,10 +5266,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5271,9 +5399,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
if (!adapter->init_done_rc)
adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
}
break;
@@ -5296,6 +5424,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
adapter->fw_done_rc = -EIO;
complete(&adapter->fw_done);
}
+
+ /* if we got here during crq-init, retry crq-init */
+ if (!completion_done(&adapter->init_done)) {
+ adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
+ }
+
if (!completion_done(&adapter->stats_done))
complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
@@ -5435,33 +5570,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
@@ -5624,10 +5747,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
adapter->from_passive_init = false;
- if (reset)
- reinit_completion(&adapter->init_done);
-
- adapter->init_done_rc = 0;
rc = ibmvnic_send_crq_init(adapter);
if (rc) {
dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5641,12 +5760,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
return adapter->init_done_rc;
}
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
+ dev_err(dev, "CRQ-init failed, passive-init\n");
return -EINVAL;
}
@@ -5686,6 +5807,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ unsigned long flags;
bool init_success;
int rc;
@@ -5730,6 +5852,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->rwi_lock);
spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
+ init_completion(&adapter->probe_done);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
init_completion(&adapter->reset_done);
@@ -5740,6 +5863,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_success = false;
do {
+ reinit_init_done(adapter);
+
+ /* clear any failovers we got in the previous pass
+ * since we are reinitializing the CRQ
+ */
+ adapter->failover_pending = false;
+
+ /* If we had already initialized CRQ, we may have one or
+ * more resets queued already. Discard those and release
+ * the CRQ before initializing the CRQ again.
+ */
+ release_crq_queue(adapter);
+
+ /* Since we are still in PROBING state, __ibmvnic_reset()
+ * will not access the ->rwi_list and since we released CRQ,
+ * we won't get _new_ transport events. But there maybe an
+ * ongoing ibmvnic_reset() call. So serialize access to
+ * rwi_list. If we win the race, ibvmnic_reset() could add
+ * a reset after we purged but thats ok - we just may end
+ * up with an extra reset (i.e similar to having two or more
+ * resets in the queue at once).
+ * CHECK.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
rc = init_crq_queue(adapter);
if (rc) {
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5771,12 +5921,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto ibmvnic_register_fail;
- }
- dev_info(&dev->dev, "ibmvnic registered\n");
if (init_success) {
adapter->state = VNIC_PROBED;
@@ -5789,6 +5933,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto ibmvnic_register_fail;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ complete(&adapter->probe_done);
+
return 0;
ibmvnic_register_fail:
@@ -5803,6 +5957,17 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+
+ /* cleanup worker thread after releasing CRQ so we don't get
+ * transport events (i.e new work items for the worker thread).
+ */
+ adapter->state = VNIC_REMOVING;
+ complete(&adapter->probe_done);
+ flush_work(&adapter->ibmvnic_reset);
+ flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+ flush_reset_queue(adapter);
+
mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
@@ -5879,10 +6044,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc)
+ if (rc) {
netdev_err(netdev,
"H_VIOCTL initiated failover failed, rc %ld\n",
rc);
+ goto last_resort;
+ }
+
+ return count;
last_resort:
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4a8f36e0ab07..fa2d607a7b1b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -919,7 +919,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
@@ -931,6 +930,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_pool *tso_pool;
+ struct completion probe_done;
struct completion init_done;
int init_done_rc;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c3def0ee7788..8d06c9d8ff8b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -115,7 +115,8 @@ enum e1000_boards {
board_pch_lpt,
board_pch_spt,
board_pch_cnp,
- board_pch_tgp
+ board_pch_tgp,
+ board_pch_adp
};
struct e1000_ps_page {
@@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
+extern const struct e1000_info e1000_pch_adp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index bcf680e83811..13382df2f2ef 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -630,6 +630,7 @@ struct e1000_phy_info {
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
+ bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 5e4fc9b4e2ad..d60e2016d03c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false;
int i = 0;
+ /* Check the PHY (LCD) reset flag */
+ if (hw->phy.reset_disable)
+ return true;
+
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 11000);
@@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
- e_dbg("NVM Checksum Invalid\n");
+ e_dbg("NVM Checksum valid bit not set\n");
- if (hw->mac.type < e1000_pch_cnp) {
+ if (hw->mac.type < e1000_pch_tgp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
@@ -6021,3 +6025,23 @@ const struct e1000_info e1000_pch_tgp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_adp_info = {
+ .mac = e1000_pch_adp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..638a3ddd7ada 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -271,6 +271,7 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 635a95927e93..c5bdef3ffe26 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
+ [board_pch_adp] = &e1000_pch_adp_info,
};
struct e1000_reg_info {
@@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
@@ -6490,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -6984,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data |= I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Disable LCD reset */
+ hw->phy.reset_disable = true;
+ }
+
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
@@ -7007,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
/* Introduce S0ix implementation */
@@ -7017,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc)
return rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data &= ~I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Enable LCD reset */
+ hw->phy.reset_disable = false;
+ }
+
return e1000e_pm_thaw(dev);
}
@@ -7898,22 +7927,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..80c5cecaf2b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -144,6 +144,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
@@ -174,7 +175,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -848,12 +848,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..9db5001297c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
@@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
vsi = pf->vsi[vf->lan_vsi_idx];
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
- dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
- vf->num_mdd_events,
- vf->num_invalid_msgs,
- vf->num_valid_msgs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld\n",
+ vf->num_mdd_events);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2a3d8aef7f4e..31b03fe78d3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -196,10 +196,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -214,8 +210,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -234,7 +243,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -257,7 +265,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -269,8 +277,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -772,9 +778,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
+ u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -10574,15 +10580,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -10853,6 +10853,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -11792,7 +11795,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12213,6 +12215,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
pf->alloc_rss_size = new_rss_size;
@@ -12595,7 +12599,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
@@ -13040,6 +13043,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -15930,8 +15937,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
@@ -16130,6 +16142,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
i40e_reset_and_rebuild(pf, false, false);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..1908eed4fa5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -413,6 +413,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b785d09c19f8..2606e8f0f19b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1377,6 +1377,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -1877,19 +1917,17 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
- * i40e_vc_send_msg_to_vf_ex
+ * i40e_vc_send_msg_to_vf
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
- * @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
-static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen,
- bool is_quiet)
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@@ -1904,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
hw = &pf->hw;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* single place to detect unsuccessful return values */
- if (v_retval && !is_quiet) {
- vf->num_invalid_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
- if (vf->num_invalid_msgs >
- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
- "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_invalid_msgs = 0;
- }
-
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
@@ -1936,23 +1955,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
}
/**
- * i40e_vc_send_msg_to_vf
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
-{
- return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
- msg, msglen, false);
-}
-
-/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
* @opcode: operation code
@@ -2618,6 +2620,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2651,6 +2706,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
@@ -2723,7 +2784,6 @@ error_param:
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
- * @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@@ -2738,8 +2798,7 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
- struct virtchnl_ether_addr_list *al,
- bool *is_quiet)
+ struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2747,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
int mac2add_cnt = 0;
int i;
- *is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@@ -2771,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
- *is_quiet = true;
return -EPERM;
}
@@ -2822,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- bool is_quiet = false;
i40e_status ret = 0;
int i;
@@ -2839,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al, &is_quiet);
+ ret = i40e_check_vf_permission(vf, al);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2877,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret, NULL, 0, is_quiet);
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..a554d0a0b09b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -10,8 +10,6 @@
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-
#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
@@ -19,6 +17,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
@@ -91,9 +90,6 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */
- /* num of continuous malformed or invalid msgs detected */
- u64 num_invalid_msgs;
- u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 59806d1f7e79..4babe4705a55 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -201,6 +201,10 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
+enum iavf_critical_section_t {
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -246,7 +250,6 @@ struct iavf_adapter {
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
- struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -284,6 +287,8 @@ struct iavf_adapter {
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
+#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8125b9120615..0e178a0a59c5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -302,8 +302,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR01);
rd32(hw, IAVF_VFINT_ICR0_ENA1);
- /* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+ queue_work(iavf_wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -1136,8 +1137,7 @@ void iavf_down(struct iavf_adapter *adapter)
rss->state = IAVF_ADV_RSS_DEL_REQUEST;
spin_unlock_bh(&adapter->adv_rss_lock);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __IAVF_RESETTING) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
@@ -2120,7 +2120,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
"Requested %d queues, but PF only gave us %d.\n",
num_req_queues,
adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
iavf_schedule_reset(adapter);
@@ -2374,17 +2374,22 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock))
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
goto restart_watchdog;
+ }
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
- if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
- adapter->state != __IAVF_RESETTING) {
- iavf_change_state(adapter, __IAVF_RESETTING);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+ queue_work(iavf_wq, &adapter->reset_task);
+ return;
}
switch (adapter->state) {
@@ -2419,6 +2424,15 @@ static void iavf_watchdog_task(struct work_struct *work)
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Do not update the state and do not reschedule
+ * watchdog task, iavf_remove should handle this state
+ * as it can loop forever
+ */
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
@@ -2435,6 +2449,17 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Set state to __IAVF_INIT_FAILED and perform remove
+ * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+ * doesn't bring the state back to __IAVF_COMM_FAILED.
+ */
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2507,7 +2532,8 @@ static void iavf_watchdog_task(struct work_struct *work)
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state >= __IAVF_DOWN)
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
@@ -2515,6 +2541,13 @@ restart_watchdog:
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
}
+/**
+ * iavf_disable_vf - disable VF
+ * @adapter: board private structure
+ *
+ * Set communication failed flag and free all resources.
+ * NOTE: This function is expected to be called with crit_lock being held.
+ **/
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
@@ -2569,7 +2602,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
- mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -2601,13 +2633,13 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (mutex_is_locked(&adapter->remove_lock))
- return;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(iavf_wq, &adapter->reset_task);
- if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
- schedule_work(&adapter->reset_task);
return;
}
+
while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
@@ -2662,6 +2694,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2670,8 +2703,7 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __IAVF_RUNNING) ||
- (adapter->state == __IAVF_RESETTING));
+ running = adapter->state == __IAVF_RUNNING;
if (running) {
netdev->flags &= ~IFF_UP;
@@ -2701,7 +2733,8 @@ continue_reset:
err);
adapter->aq_required = 0;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_reinit_interrupt_scheme(adapter);
if (err)
goto reset_err;
@@ -2773,12 +2806,13 @@ continue_reset:
if (err)
goto reset_err;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto reset_err;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
}
iavf_configure(adapter);
@@ -2793,6 +2827,9 @@ continue_reset:
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
+
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
@@ -2826,13 +2863,19 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ queue_work(iavf_wq, &adapter->adminq_task);
+ goto out;
+ }
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
- if (iavf_lock_timeout(&adapter->crit_lock, 200))
- goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2848,6 +2891,24 @@ static void iavf_adminq_task(struct work_struct *work)
} while (pending);
mutex_unlock(&adapter->crit_lock);
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+ if (adapter->netdev_registered ||
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features
+ (adapter, 0, netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+ }
+
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
@@ -3800,11 +3861,12 @@ static int iavf_close(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int status;
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return 0;
+ mutex_lock(&adapter->crit_lock);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ if (adapter->state <= __IAVF_DOWN_PENDING) {
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+ }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -3853,8 +3915,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
iavf_notify_client_l2_params(&adapter->vsi);
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ queue_work(iavf_wq, &adapter->reset_task);
+ }
return 0;
}
@@ -4431,7 +4496,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mutex_init(&adapter->crit_lock);
mutex_init(&adapter->client_lock);
- mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -4547,7 +4611,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- enum iavf_state_t prev_state = adapter->last_state;
struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
@@ -4556,14 +4619,37 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
int err;
- /* Indicate we are in remove and not to run reset_task */
- mutex_lock(&adapter->remove_lock);
- cancel_work_sync(&adapter->reset_task);
+
+ /* When reboot/shutdown is in progress no need to do anything
+ * as the adapter is already REMOVE state that was set during
+ * iavf_shutdown() callback.
+ */
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+ /* Wait until port initialization is complete.
+ * There are flows where register/unregister netdev may race.
+ */
+ while (1) {
+ mutex_lock(&adapter->crit_lock);
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_DOWN ||
+ adapter->state == __IAVF_INIT_FAILED) {
+ mutex_unlock(&adapter->crit_lock);
+ break;
+ }
+
+ mutex_unlock(&adapter->crit_lock);
+ usleep_range(500, 1000);
+ }
cancel_delayed_work_sync(&adapter->watchdog_task);
- cancel_delayed_work_sync(&adapter->client_task);
+
if (adapter->netdev_registered) {
- unregister_netdev(netdev);
+ rtnl_lock();
+ unregister_netdevice(netdev);
adapter->netdev_registered = false;
+ rtnl_unlock();
}
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_del_device(adapter);
@@ -4572,6 +4658,10 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
+ mutex_lock(&adapter->crit_lock);
+ dev_info(&adapter->pdev->dev, "Remove device\n");
+ iavf_change_state(adapter, __IAVF_REMOVE);
+
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -4579,37 +4669,24 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- dev_info(&adapter->pdev->dev, "Removing device\n");
+ iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
- iavf_change_state(adapter, __IAVF_REMOVE);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->adminq_task);
+ cancel_delayed_work_sync(&adapter->client_task);
+
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
- iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
- /* In case we enter iavf_remove from erroneous state, free traffic irqs
- * here, so as to not cause a kernel crash, when calling
- * iavf_reset_interrupt_capability.
- */
- if ((adapter->last_state == __IAVF_RESETTING &&
- prev_state != __IAVF_DOWN) ||
- (adapter->last_state == __IAVF_RUNNING &&
- !(netdev->flags & IFF_UP)))
- iavf_free_traffic_irqs(adapter);
-
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- cancel_delayed_work_sync(&adapter->watchdog_task);
-
- cancel_work_sync(&adapter->adminq_task);
-
iavf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -4621,8 +4698,6 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
- mutex_unlock(&adapter->remove_lock);
- mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5ee1d118fd30..5263cefe46f5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1835,6 +1835,22 @@ void iavf_request_reset(struct iavf_adapter *adapter)
}
/**
+ * iavf_netdev_features_vlan_strip_set - update vlan strip status
+ * @netdev: ptr to netdev being adjusted
+ * @enable: enable or disable vlan strip
+ *
+ * Helper function to change vlan strip status in netdev->features.
+ */
+static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ const bool enable)
+{
+ if (enable)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2057,8 +2073,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be enabled by ethtool.
+ * Disable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be disabled by ethtool.
+ * Enable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, true);
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
@@ -2146,29 +2172,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
sizeof(adapter->vlan_v2_caps)));
iavf_process_config(adapter);
-
- /* unlock crit_lock before acquiring rtnl_lock as other
- * processes holding rtnl_lock could be waiting for the same
- * crit_lock
- */
- mutex_unlock(&adapter->crit_lock);
- /* VLAN capabilities can change during VFR, so make sure to
- * update the netdev features with the new capabilities
- */
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- if (iavf_lock_timeout(&adapter->crit_lock, 10000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
- __FUNCTION__);
-
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
- iavf_set_queue_vlan_tag_loc(adapter);
-
+ adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -2334,6 +2338,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ /* PF enabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ /* PF disabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4e16d185077d..bea1d1e39fa2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -280,7 +280,6 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
- ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
@@ -483,6 +482,8 @@ enum ice_pf_flags {
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_MTU_CHANGED,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
@@ -897,7 +898,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- ice_unplug_aux_dev(pf);
+ /* We can directly unplug aux device here only if the flag bit
+ * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+ * could race with ice_plug_aux_dev() called from
+ * ice_service_task(). In this case we only clear that bit now and
+ * aux device will be unplugged later once ice_plug_aux_device()
+ * called from ice_service_task() finishes (see ice_service_task()).
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 408d15a5b0e3..e2af99a763ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3340,9 +3340,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
- struct ice_link_default_override_tlv tlv;
+ struct ice_link_default_override_tlv tlv = { 0 };
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 864692b157b6..73edc24d81d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
+ rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index e2e3ef7fba7f..a5dc9824e255 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2298,7 +2298,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (err)
goto done;
- curr_link_speed = pi->phy.link_info.link_speed;
+ curr_link_speed = pi->phy.curr_user_speed_req;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
/* If speed didn't get set, set it to what it currently is.
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index e375ac849aec..4f954db01b92 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
@@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag,
}
/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d981dc6f2323..85a612838a89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c187cf04fcf..53256aca27c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30814435f779..b7e8744b0c0a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1799,7 +1799,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
+ mutex_lock(&pf->vf[i].cfg_lock);
ice_reset_vf(&pf->vf[i], false);
+ mutex_unlock(&pf->vf[i].cfg_lock);
}
}
}
@@ -2253,6 +2255,30 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+ /* Plug aux device per request */
+ ice_plug_aux_dev(pf);
+
+ /* Mark plugging as done but check whether unplug was
+ * requested during ice_plug_aux_dev() call
+ * (e.g. from ice_clear_rdma_cap()) and if so then
+ * plug aux device.
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+ }
+
+ if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -3018,7 +3044,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
struct iidc_event *event;
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (event) {
set_bit(IIDC_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
@@ -4854,7 +4880,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
@@ -4862,6 +4887,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5936,8 +5962,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (ring)
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ if (!ring)
+ continue;
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -6817,7 +6844,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct iidc_event *event;
u8 count = 0;
int err = 0;
@@ -6852,14 +6878,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
@@ -6867,21 +6885,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- goto event_after;
+ return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- goto event_after;
+ return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
return err;
}
@@ -8525,6 +8540,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +8553,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index dc1b0e9e6df5..695b6dd61dc2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -47,6 +47,7 @@ enum ice_protocol_type {
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ae291d442539..000c39d163a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1533,9 +1533,12 @@ exit:
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
+ int ret;
then = ns_to_timespec64(delta);
- ice_ptp_gettimex64(info, &now, NULL);
+ ret = ice_ptp_gettimex64(info, &now, NULL);
+ if (ret)
+ return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 11ae0bee3590..475ec2afa210 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4537,6 +4537,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
break;
@@ -5305,7 +5306,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN) {
+ if (rinfo->tun_type != ICE_NON_TUN &&
+ rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
pkt_offsets);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e8aab664270a..65cf32eb4046 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
headers->l4_key.dst_port = match.key->dst;
headers->l4_mask.dst_port = match.mask->dst;
}
@@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
headers->l4_key.src_port = match.key->src;
headers->l4_mask.src_port = match.mask->src;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 39b80124d282..1be3cd4b2bef 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -500,8 +500,6 @@ void ice_free_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
unsigned int tmp, i;
- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
if (!pf->vf)
return;
@@ -519,22 +517,26 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- ice_dis_vf_qs(&pf->vf[i]);
-
tmp = pf->num_alloc_vfs;
pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(&pf->vf[i]);
- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
- ice_free_vf_res(&pf->vf[i]);
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- mutex_destroy(&pf->vf[i].cfg_lock);
+ mutex_unlock(&vf->cfg_lock);
+
+ mutex_destroy(&vf->cfg_lock);
}
if (ice_sriov_free_msix_res(pf))
@@ -570,7 +572,6 @@ void ice_free_vfs(struct ice_pf *pf)
i);
clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -1498,6 +1499,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
+ mutex_lock(&vf->cfg_lock);
+
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -1512,6 +1515,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
}
if (ice_is_eswitch_mode_switchdev(pf))
@@ -1562,6 +1567,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
+ lockdep_assert_held(&vf->cfg_lock);
+
dev = ice_pf_to_dev(pf);
if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
@@ -2061,9 +2068,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx))
+ if (reg & BIT(bit_idx)) {
/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ mutex_lock(&vf->cfg_lock);
ice_reset_vf(vf, true);
+ mutex_unlock(&vf->cfg_lock);
+ }
}
}
@@ -2140,7 +2150,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
if (!vf)
return;
+ mutex_lock(&vf->cfg_lock);
ice_vc_reset_vf(vf);
+ mutex_unlock(&vf->cfg_lock);
}
/**
@@ -2170,24 +2182,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
dev = ice_pf_to_dev(pf);
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_inval_msgs++;
- dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
- v_opcode, v_retval);
- if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(dev, "Use PF Control I/F to enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- return -EIO;
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_inval_msgs = 0;
- }
-
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
@@ -4625,10 +4619,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
struct device *dev;
int err = 0;
- /* if de-init is underway, don't process messages from VF */
- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
- return;
-
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 752487a1bdd6..8f27255cc0cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -14,7 +14,6 @@
#define ICE_MAX_MACADDR_PER_VF 18
/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
#define ICE_MDD_EVENTS_THRESHOLD 30
/* Static VF transaction/status register def */
@@ -134,8 +133,6 @@ struct ice_vf {
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- u64 num_inval_msgs; /* number of continuous invalid msgs */
- u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..40dbf4b43234 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b3fd8e5cd85b..6a5e9cf6b5da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -390,12 +390,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
u32 cmd_type;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
- !netif_carrier_ok(xdp_ring->netdev)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
}
+ if (!netif_carrier_ok(xdp_ring->netdev))
+ break;
+
if (!xsk_tx_peek_desc(pool, &desc))
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0015fcf1df2b..0f293acd17e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41d11137cde0..5712c3e94be8 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
if (ctl & LTQ_DMA_EOP) {
ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
- netif_receive_skb(ch->skb_head);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += ch->skb_head->len;
+ netif_receive_skb(ch->skb_head);
ch->skb_head = NULL;
ch->skb_tail = NULL;
ret = XRX200_DMA_PACKET_COMPLETE;
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index f99adbf26ab4..04345b929d8e 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 105247582684..143ca8be5eb5 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
static struct platform_device *port_platdev[3];
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
@@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- of_get_mac_address(pnp, ppd.mac_addr);
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret)
+ return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
return ret;
}
}
return 0;
}
-static void mv643xx_eth_shared_of_remove(void)
-{
- int n;
-
- for (n = 0; n < 3; n++) {
- platform_device_del(port_platdev[n]);
- port_platdev[n] = NULL;
- }
-}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7cdbf8b8bbf6..1a835b48791b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
@@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
- port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
-
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..3631d612aaca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1570,6 +1570,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..b33e7d1d0851 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -107,6 +107,9 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..58e2aeebc14f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -732,6 +732,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..4180376fa676 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..9ea2f6ac38ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -30,6 +30,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -54,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -252,23 +291,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
if (!rpm || lmac_id >= rpm->lmac_count)
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..ff580311edd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -43,6 +43,8 @@
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -57,4 +59,6 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..54e1b27a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -520,8 +520,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..5ed94cfb47d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -806,6 +806,7 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..8a7ac5a8b821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -441,16 +441,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
- return 0;
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a73a8017e0ee..a79201a9a6f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -605,6 +605,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
+ case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..d1eddb769a41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1224,6 +1224,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..97fb61915379 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -512,11 +512,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3891,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3918,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..91f86d77cd41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -402,6 +402,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +424,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +496,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..19c53e591d0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1098,14 +1098,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1132,6 +1124,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..14509fc64cce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -603,6 +603,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6080ebd9bd94..d39341e4ab37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -394,7 +394,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index cad93f747d0c..73cd0a4b7291 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -554,6 +554,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
dev_info(prestera_dev(sw), "using random base mac address\n");
}
of_node_put(base_mac_np);
+ of_node_put(np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 17fe05809653..3eacd8739929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -131,11 +131,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->alloc_lock, flags);
+ lockdep_assert_held(&cmd->alloc_lock);
set_bit(idx, &cmd->bitmask);
- spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,17 +142,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
+ struct mlx5_cmd *cmd = ent->cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
if (!refcount_dec_and_test(&ent->refcnt))
- return;
+ goto out;
if (ent->idx >= 0) {
- struct mlx5_cmd *cmd = ent->cmd;
-
cmd_free_index(cmd, ent->idx);
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
}
cmd_free_ent(ent);
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 812e6810cb3b..c14e06ca64d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_ll {
@@ -241,8 +241,8 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
union {
- struct mlx5_mtt inline_mtts[0];
- struct mlx5_klm inline_klms[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
+ DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 00449df98a5e..c1e07496c89c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{
- *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 9c076aa20306..b6f5c1bcdbcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *priv;
-
- /* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
- return false;
-
- priv = netdev_priv(netdev);
- rpriv = priv->ppriv;
-
- /* Egress acl forward to vport is supported only non-uplink representor */
- return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_bond *bond;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return NOTIFY_DONE;
+
+ bond = container_of(nb, struct mlx5e_rep_bond, nb);
+ priv = netdev_priv(netdev);
+ rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
+ /* Verify VF representor is on the same device of the bond handling the netevent. */
+ if (rpriv->uplink_priv.bond != bond)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGELOWERSTATE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index c6d2f8c78db7..48dc121b2cb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
}
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
- err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
@@ -509,7 +509,9 @@ err_register_swdev_blk:
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
+ rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
}
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return;
cancel_delayed_work_sync(&br_offloads->update_work);
- unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 26efa33de56f..9cc844bd00f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 06ec30cdb269..58cc33f1363d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
+ /* It's redundant to do ct clear more than once. */
+ if (clear_action && parse_state->ct_clear)
+ return 0;
+
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
@@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->ct_clear = clear_action;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index c614fc7fdc9c..2e615e0ba972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
return -ENOMEM;
parse_state->encap = false;
+
+ if (parse_state->mpls_push) {
+ memcpy(&parse_attr->mpls_info[esw_attr->out_count],
+ &parse_state->mpls_info, sizeof(parse_state->mpls_info));
+ parse_state->mpls_push = false;
+ }
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 784fc4f68b1e..89ca88c78840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
return true;
}
+static void
+copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
+ const struct flow_action_entry *act)
+{
+ mpls_info->label = act->mpls_push.label;
+ mpls_info->tc = act->mpls_push.tc;
+ mpls_info->bos = act->mpls_push.bos;
+ mpls_info->ttl = act->mpls_push.ttl;
+}
+
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
+ copy_mpls_info(&parse_state->mpls_info, act);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index f832c26ff2c3..70b40ae384e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -35,6 +35,7 @@ enum {
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9918ed8c059b..d39d0dae22fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
+ const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
@@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
+ mpls_info = &parse_attr->mpls_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err_init;
}
e->tun_info = tun_info;
+ memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 60952b33b568..c5b1617d556f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
- u32 tun_id;
- tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
- *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+ *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
return 0;
}
@@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters_2);
- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters_2);
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
- return 0;
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
- return 0;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- if (!enc_keyid.mask->keyid)
- return 0;
-
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return -EOPNOTSUPP;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index da169b816665..d4239e3b3c88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
break;
- case MLX5E_PACKET_MERGE_SHAMPO:
- MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 4cdf8e5b24c2..b789af07829c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+}
+
struct mlx5e_shampo_umr {
u16 len;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 338d65e2c9ce..56e10c84a706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
+ MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
dseg++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 2db9573a3fe6..b56fea142c24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | IP | [TCP | UDP] */
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index b98db50c3418..428881e0adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -131,14 +131,17 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- struct xfrm_offload *xo = xfrm_offload(skb);
+ u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
return false;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (xo->inner_ipproto) {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ inner_ipproto = xfrm_offload(skb)->inner_ipproto;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 57d755db1cf5..6e80585d731f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
- return 0;
+ return size_read;
}
i += size_read;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bf80fb612449..3667f5ef5990 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b01dacb6f527..b3f7520dfd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
struct rcu_head rcu;
};
+struct mlx5e_mpls_info {
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+};
+
struct mlx5e_encap_entry {
/* attached neigh hash entry */
struct mlx5e_neigh_hash_entry *nhe;
@@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e86ccc22fb82..6530d7bd5045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
@@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
}
/* True when explicitly set via priv flag, or XDP prog is loaded */
- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+ get_cqe_tls_offload(cqe))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -1871,7 +1872,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
-static void
+static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
@@ -1895,7 +1896,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
if (unlikely(!skb))
- return;
+ return NULL;
/* queue up for recycling/reuse */
page_ref_inc(head->page);
@@ -1907,7 +1908,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
- return;
+ return NULL;
}
prefetchw(skb->data);
@@ -1918,9 +1919,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb->tail += head_size;
skb->len += head_size;
}
- rq->hw_gro_data->skb = skb;
- NAPI_GRO_CB(skb)->count = 1;
- skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+ return skb;
}
static void
@@ -1973,13 +1972,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
@@ -2011,9 +2011,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (likely(head_size))
+ *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ else
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
+ page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
+
+ NAPI_GRO_CB(*skb)->count = 1;
+ skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 &&
@@ -2027,8 +2034,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
}
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ if (likely(head_size)) {
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ }
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 8c9163d2c646..08a75654f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
+ count++;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 26e326fe503c..00f1d16db456 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
-
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
struct ethtool_fec_stats *fec_stats)
{
+ if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ return;
+
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, fec_stats);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d908a7e1406..b27532a9301e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_out;
- if (!attr->chain && esw_attr->int_port) {
+ if (!attr->chain && esw_attr->int_port &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss
@@ -3191,6 +3192,30 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 7fd33b356cc8..ee7ecb88adc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
- memcpy(vhdr, skb->data, cpy1_sz);
+ memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f690f430f40f..05e08cec5a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads;
+ ASSERT_RTNL();
+
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
@@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+ ASSERT_RTNL();
+
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 3401188e0a60..51ac24e6ec3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used)
),
TP_fast_assign(
- strncpy(__entry->dev_name,
+ strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 11bbcd5f5b8b..694c54066955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
}
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 min_rate, u32 max_rate)
+ u32 max_rate, u32 min_rate)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9a7b25692505..cfcd72bad9af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- mlx5_ecpf_vport_exists(esw->dev))
- return false;
-
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b628917e38e4..537c82b9aa53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else {
+ up_write_ref_node(&fte->node, false);
}
kfree(handle);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 0b0234f9d694..84dbe46d5ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer(&fw_reset->timer);
+ del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 1ca01a5b6cdd..626aa60b6099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
}
+ /* Handle multipath entry with lower priority value */
+ if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
+ return;
+
/* Handle add/replace event */
nhs = fib_info_num_path(fi);
if (nhs == 1) {
@@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
if (i < 0)
- i = MLX5_LAG_NORMAL_AFFINITY;
- else
- ++i;
+ return;
+ i++;
mlx5_lag_set_port_affinity(ldev, i);
}
+
+ mp->mfi = fi;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index d5e47630e284..df58cba37930 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
+ if (!chains->dev->priv.eswitch ||
+ chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
+ return 1;
+
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
@@ -211,7 +212,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2c774f367199..bba72b220cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1ef2b6a848c1..7b16a1188aab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
- size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+ size = MLX5_EEPROM_PAGE_LENGTH - offset;
query.size = size;
+ query.offset = offset;
return mlx5_query_mcia(dev, &query, data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 7f6fd9c5e371..e289cfdbce07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,7 +4,6 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr);
}
-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{
- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
- if (!chunk->ste_arr)
- return -ENOMEM;
-
- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
- DR_STE_SIZE_REDUCED, GFP_KERNEL);
- if (!chunk->hw_ste_arr)
- goto out_free_ste_arr;
-
- chunk->miss_list = kvmalloc(chunk->num_of_entries *
- sizeof(chunk->miss_list[0]), GFP_KERNEL);
- if (!chunk->miss_list)
- goto out_free_hw_ste_arr;
+ /* We support only one type of STE size, both for ConnectX-5 and later
+ * devices. Once the support for match STE which has a larger tag is
+ * added (32B instead of 16B), the STE size for devices later than
+ * ConnectX-5 needs to account for that.
+ */
+ return DR_STE_SIZE_REDUCED;
+}
- return 0;
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int index = offset / DR_STE_SIZE;
-out_free_hw_ste_arr:
- kvfree(chunk->hw_ste_arr);
-out_free_ste_arr:
- kvfree(chunk->ste_arr);
- return -ENOMEM;
+ chunk->ste_arr = &buddy->ste_arr[index];
+ chunk->miss_list = &buddy->miss_list[index];
+ chunk->hw_ste_arr = buddy->hw_ste_arr +
+ index * dr_icm_buddy_get_ste_size(buddy);
}
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
- kvfree(chunk->miss_list);
- kvfree(chunk->hw_ste_arr);
- kvfree(chunk->ste_arr);
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+
+ memset(chunk->hw_ste_arr, 0,
+ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->ste_arr, 0,
+ chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk);
}
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+ buddy->ste_arr = kvcalloc(num_of_entries,
+ sizeof(struct mlx5dr_ste), GFP_KERNEL);
+ if (!buddy->ste_arr)
+ return -ENOMEM;
+
+ /* Preallocate full STE size on non-ConnectX-5 devices since
+ * we need to support both full and reduced with the same cache.
+ */
+ buddy->hw_ste_arr = kvcalloc(num_of_entries,
+ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+ if (!buddy->hw_ste_arr)
+ goto free_ste_arr;
+
+ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+ if (!buddy->miss_list)
+ goto free_hw_ste_arr;
+
+ return 0;
+
+free_hw_ste_arr:
+ kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+ kvfree(buddy->ste_arr);
+ return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ kvfree(buddy->ste_arr);
+ kvfree(buddy->hw_ste_arr);
+ kvfree(buddy->miss_list);
+}
+
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy;
@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr;
buddy->pool = pool;
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ /* Reduce allocations by preallocating and reusing the STE structures */
+ if (dr_icm_buddy_init_ste_cache(buddy))
+ goto err_cleanup_buddy;
+ }
+
/* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0;
+err_cleanup_buddy:
+ mlx5dr_buddy_cleanup(buddy);
err_free_buddy:
kvfree(buddy);
free_mr:
@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy);
+ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_cleanup_ste_cache(buddy);
+
kvfree(buddy);
}
@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
chunk->byte_size =
mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
- mlx5dr_err(pool->dmn,
- "Failed to init ste arrays (order: %d)\n",
- chunk_size);
- goto out_free_chunk;
- }
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_init(chunk, offset);
buddy_mem_pool->used_memory += chunk->byte_size;
- chunk->buddy_mem = buddy_mem_pool;
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
-
-out_free_chunk:
- kvfree(chunk);
- return NULL;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
- return true;
+ int allow_hot_size;
+
+ /* sync when hot memory reaches half of the pool size */
+ allow_hot_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) / 2;
- return false;
+ return pool->hot_memory_size > allow_hot_size;
}
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index e87cf498c77b..38971fe1dfe1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
return (spec->dmac_47_16 || spec->dmac_15_0);
}
-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
- spec->src_ip_63_32 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
- spec->dst_ip_63_32 || spec->dst_ip_31_0);
-}
-
static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
{
return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.outer))
+ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.outer))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.inner))
+ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.inner))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 7e61742e58a0..187e29b409b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+ struct mlx5dr_match_spec *spec)
+{
+ if (spec->ip_version) {
+ if (spec->ip_version != 0xf) {
+ mlx5dr_err(dmn,
+ "Partial ip_version mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+ } else if (spec->ethertype != 0xffff &&
+ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+ mlx5dr_err(dmn,
+ "Partial/no ethertype mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
{
- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (value)
+ return 0;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
}
}
+ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->outer))
+ return -EINVAL;
+
+ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->inner))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1b3d484b99be..55fcb751e24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -798,6 +798,16 @@ struct mlx5dr_match_param {
(_misc3)->icmpv4_code || \
(_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+ (_spec)->src_ip_95_64 || \
+ (_spec)->src_ip_63_32 || \
+ (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+ (_spec)->dst_ip_95_64 || \
+ (_spec)->dst_ip_63_32 || \
+ (_spec)->dst_ip_31_0)
+
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
u64 drop_icm_address_tx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index a476da2424f8..3f311462bedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
- if (term_actions->reformat)
+ if (term_actions->reformat) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->reformat;
+ }
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c7c93131b762..dfa223415fe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free.
*/
struct list_head hot_list;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ struct list_head *miss_list;
+ u8 *hw_ste_arr;
};
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 0303e727e99f..d167d93e4c12 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -452,11 +452,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
return ks8851_probe_common(netdev, dev, msg_enable);
}
-static int ks8851_remove_spi(struct spi_device *spi)
+static void ks8851_remove_spi(struct spi_device *spi)
{
ks8851_remove_common(&spi->dev);
-
- return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 634ac7649c43..db5a3edb4c3c 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1612,15 +1612,13 @@ error_alloc:
return ret;
}
-static int enc28j60_remove(struct spi_device *spi)
+static void enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
-
- return 0;
}
static const struct of_device_id enc28j60_dt_ids[] = {
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b90efc80fb59..dc1840cb5b10 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1093,7 +1093,7 @@ error_out:
return ret;
}
-static int encx24j600_spi_remove(struct spi_device *spi)
+static void encx24j600_spi_remove(struct spi_device *spi)
{
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1101,8 +1101,6 @@ static int encx24j600_spi_remove(struct spi_device *spi)
kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
-
- return 0;
}
static const struct spi_device_id encx24j600_spi_id_table[] = {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ca5f1177963d..ce5970bdcc6a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -40,11 +40,12 @@ static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
{
u32 val;
- return readx_poll_timeout(lan966x_mac_get_status,
- lan966x, val,
- (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
- MACACCESS_CMD_IDLE,
- TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
}
static void lan966x_mac_select(struct lan966x *lan966x,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2cb70da63db3..1f60fd125a1d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -182,9 +182,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
- return readx_poll_timeout(lan966x_port_inj_status, lan966x, val,
- QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
- READL_SLEEP_US, READL_TIMEOUT_US);
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
}
static int lan966x_port_ifh_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 59783fc46a7b..10b866e9f726 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
- for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index a1acc9b461f2..d40e18ce3293 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -16,6 +16,8 @@
#include <linux/phylink.h>
#include <linux/hrtimer.h>
+#include "sparx5_main_regs.h"
+
/* Target chip type */
enum spx5_target_chiptype {
SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index dc7e5ea6ec15..148d431fcde4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
+ netif_rx(skb);
}
static int sparx5_inject(struct sparx5 *sparx5,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 4ce490a25f33..8e56ffa1c4f7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
struct sparx5 *sparx5 = port->sparx5;
int ret;
- /* Make the port a member of the VLAN */
- set_bit(port->portno, sparx5->vlan_mask[vid]);
- ret = sparx5_vlant_set_mask(sparx5, vid);
- if (ret)
- return ret;
-
- /* Default ingress vlan classification */
- if (pvid)
- port->pvid = vid;
-
/* Untagged egress vlan classification */
if (untagged && port->vid != vid) {
if (port->vid) {
@@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
port->vid = vid;
}
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
sparx5_vlan_port_apply(sparx5, port);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 455293aa6343..fd3ceb74620d 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
@@ -1432,6 +1436,8 @@ static void
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1440,6 +1446,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1448,6 +1456,8 @@ static void
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1456,6 +1466,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1737,12 +1749,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
}
EXPORT_SYMBOL(ocelot_get_strings);
+/* Caller must hold &ocelot->stats_lock */
static void ocelot_update_stats(struct ocelot *ocelot)
{
int i, j;
- mutex_lock(&ocelot->stats_lock);
-
for (i = 0; i < ocelot->num_phys_ports; i++) {
/* Configure the port to read the stats from */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
@@ -1761,8 +1772,6 @@ static void ocelot_update_stats(struct ocelot *ocelot)
~(u64)U32_MAX) + val;
}
}
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_check_stats_work(struct work_struct *work)
@@ -1771,7 +1780,9 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ mutex_lock(&ocelot->stats_lock);
ocelot_update_stats(ocelot);
+ mutex_unlock(&ocelot->stats_lock);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
@@ -1781,12 +1792,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i;
+ mutex_lock(&ocelot->stats_lock);
+
/* check and update now */
ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+ mutex_unlock(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 949858891973..fdb4d7e7296c 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -60,6 +60,12 @@ static int ocelot_chain_to_block(int chain, bool ingress)
*/
static int ocelot_chain_to_lookup(int chain)
{
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
return (chain / VCAP_LOOKUP) % 10;
}
@@ -68,7 +74,15 @@ static int ocelot_chain_to_lookup(int chain)
*/
static int ocelot_chain_to_pag(int chain)
{
- int lookup = ocelot_chain_to_lookup(chain);
+ int lookup;
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
+ lookup = ocelot_chain_to_lookup(chain);
/* calculate PAG value as chain index relative to the first PAG */
return chain - VCAP_IS2_CHAIN(lookup, 0);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 784292b16290..1543e47456d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index dfb4468fe287..cb43651ea9ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- int ida_idx = NFP_MAX_MAC_INDEX, err;
struct nfp_tun_offloaded_mac *entry;
+ int ida_idx = -1, err;
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
@@ -997,7 +997,7 @@ err_remove_hash:
err_free_entry:
kfree(entry);
err_free_ida:
- if (ida_idx != NFP_MAX_MAC_INDEX)
+ if (ida_idx != -1)
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
return err;
@@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
+ u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
- u16 nfp_mac_idx;
-
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
@@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
- u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
@@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ else
+ nfp_mac_idx = entry->index;
+
/* If MAC has global ID then extract and free the ida entry. */
- if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index bc39558fe82b..756f97dce85b 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct netdata_local *pldat;
+ int ret;
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(ndev->irq);
@@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
pldat = netdev_priv(ndev);
/* Enable interface clock */
- clk_enable(pldat->clk);
+ ret = clk_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Reset and initialize */
__lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 8ac38828ba45..48cf4355bc47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3806,11 +3806,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
return found;
}
-static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
- u16 vfid,
- struct qed_mcp_link_params *p_params,
- struct qed_mcp_link_state *p_link,
- struct qed_mcp_link_capabilities *p_caps)
+static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
@@ -3818,7 +3818,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
- return;
+ return -EINVAL;
p_bulletin = p_vf->bulletin.p_virt;
@@ -3828,6 +3828,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
if (p_caps)
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ return 0;
}
static int
@@ -4686,6 +4687,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
struct qed_public_vf_info *vf_info;
struct qed_mcp_link_state link;
u32 tx_rate;
+ int ret;
/* Sanitize request */
if (IS_VF(cdev))
@@ -4699,7 +4701,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
- qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ if (ret)
+ return ret;
/* Fill information about VF */
ivi->vf = vf_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 597cd9cd57b5..7b0e390c0b07 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
p_iov->bulletin.size,
&p_iov->bulletin.phys,
GFP_KERNEL);
+ if (!p_iov->bulletin.p_virt)
+ goto free_pf2vf_reply;
+
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt,
@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
return rc;
+free_pf2vf_reply:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 955cce644392..3c5494afd3c0 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -1001,7 +1001,7 @@ qca_spi_probe(struct spi_device *spi)
return 0;
}
-static int
+static void
qca_spi_remove(struct spi_device *spi)
{
struct net_device *qcaspi_devs = spi_get_drvdata(spi);
@@ -1011,8 +1011,6 @@ qca_spi_remove(struct spi_device *spi)
unregister_netdev(qcaspi_devs);
free_netdev(qcaspi_devs);
-
- return 0;
}
static const struct spi_device_id qca_spi_id[] = {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 32161a56726c..2881f5b2b5f4 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion\n", __func__);
- return -EINVAL;
+ return 1;
}
__setup("sxgbeeth=", sxgbe_cmdline_opt);
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 16a4cbae9326..c672f92d65e9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -749,6 +749,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
+ u8 addr[ETH_ALEN];
ether3_banner();
@@ -776,7 +777,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
- ether3_addr(dev->dev_addr, ec);
+ ether3_addr(addr, ec);
+ eth_hw_addr_set(dev, addr);
priv(dev)->dev = dev;
timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index be6bfd6b7ec7..50baf62b2cbc 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
spin_lock_bh(&mcdi->iface_lock);
++mcdi->seqno;
+ seqno = mcdi->seqno & SEQ_MASK;
spin_unlock_bh(&mcdi->iface_lock);
- seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index dd6f69ced4ee..fc9cef9dcefc 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
return ret;
if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
return ret;
- }
+ }
memcpy(data, eebuf+eeprom->offset, eeprom->len);
return 0;
}
@@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev,
return ret;
/* write byte */
if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
- return ret;
+ return ret;
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
return ret;
- }
- return 0;
+ }
+ return 0;
}
static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 617d0e4c6495..09644ab0d87a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -756,7 +756,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
if (err) {
dev_err(priv->device, "EMAC reset timeout\n");
- return -EFAULT;
+ return err;
}
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index e2e0f977875d..c3f10a92b62b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -22,21 +22,21 @@
#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
@@ -49,13 +49,15 @@ struct visconti_eth {
void __iomem *reg;
u32 phy_intf_sel;
struct clk *phy_ref_clk;
+ struct device *dev;
spinlock_t lock; /* lock to protect register update */
};
static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
{
struct visconti_eth *dwmac = priv;
- unsigned int val, clk_sel_val;
+ struct net_device *netdev = dev_get_drvdata(dwmac->dev);
+ unsigned int val, clk_sel_val = 0;
unsigned long flags;
spin_lock_irqsave(&dwmac->lock, flags);
@@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
break;
default:
/* No bit control */
- break;
+ netdev_err(netdev, "Unsupported speed request (%d)", speed);
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+ return;
}
writel(val, dwmac->reg + MAC_CTRL_REG);
@@ -96,31 +100,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
val |= ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
switch (dwmac->phy_intf_sel) {
case ETHER_CONFIG_INTF_RGMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_RMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_MII:
default:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_EN;
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
}
- /* Start clock */
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
spin_unlock_irqrestore(&dwmac->lock, flags);
}
@@ -219,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
+ dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 1914ad698cab..acd70b9a3173 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -150,6 +150,7 @@
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
+#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 40b5ed94cb54..5b195d5051d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -194,7 +194,6 @@ struct stmmac_priv {
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
- int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
@@ -229,7 +228,6 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_irq[PHY_MAX_ADDR];
struct phylink_config phylink_config;
struct phylink *phylink;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 164dff5ec32e..abfb3cd5958d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -21,10 +21,18 @@
#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
+#define GMAC4_REG_SPACE_SIZE 0x116C
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
#define XGMAC_ETHTOOL_NAME "st_xgmac"
+/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
+ *
+ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
+ * same time due to the conflicting macro names.
+ */
+#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
+
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
@@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
if (priv->plat->has_xgmac)
return XGMAC_REGSIZE * 4;
+ else if (priv->plat->has_gmac4)
+ return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- if (!priv->plat->has_xgmac) {
- /* Copy DMA registers to where ethtool expects them */
+ /* Copy DMA registers to where ethtool expects them */
+ if (priv->plat->has_gmac4) {
+ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
+ NUM_DWMAC4_DMA_REGS * 4);
+ } else if (!priv->plat->has_xgmac) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 074e2cdfb0fa..a7ec9f4d46ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -145,15 +145,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
static void get_systime(void __iomem *ioaddr, u64 *systime)
{
- u64 ns;
-
- /* Get the TSSS value */
- ns = readl(ioaddr + PTP_STNSR);
- /* Get the TSS and convert sec time value to nanosecond */
- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+ u64 ns, sec0, sec1;
+
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ do {
+ sec0 = sec1;
+ /* Get the TSSS value */
+ ns = readl_relaxed(ioaddr + PTP_STNSR);
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ } while (sec0 != sec1);
if (systime)
- *systime = ns;
+ *systime = ns + (sec1 * 1000000000ULL);
}
static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6708ca2aa4f7..422e3225f476 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -402,7 +402,7 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -412,13 +412,14 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -450,8 +451,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -889,6 +890,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -911,8 +915,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -2260,6 +2262,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2647,8 +2666,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -2902,8 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3238,7 +3259,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3248,7 +3269,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3305,13 +3326,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- }
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
@@ -3759,6 +3780,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -6508,8 +6530,10 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6570,6 +6594,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -7250,6 +7275,10 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
@@ -7268,8 +7297,6 @@ int stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -7447,6 +7474,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
@@ -7463,7 +7491,7 @@ static int __init stmmac_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
if (kstrtoint(opt + 6, 0, &debug))
@@ -7494,11 +7522,11 @@ static int __init stmmac_cmdline_opt(char *str)
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion", __func__);
- return -EINVAL;
+ return 1;
}
__setup("stmmaceth=", stmmac_cmdline_opt);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0d24ebd37873..1c9f02f9c317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -297,9 +297,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
- if (priv->plat->ptp_clk_freq_config)
- priv->plat->ptp_clk_freq_config(priv);
-
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ad9029ae6848..77e5dffb558f 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
- goto err_out_iounmap;
+ goto err_out_free_coherent;
}
pci_set_drvdata(pdev, hp);
@@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
return 0;
+err_out_free_coherent:
+ dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+ hp->happy_block, hp->hblock_dvma);
+
err_out_iounmap:
iounmap(hp->gregs);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index ba220593e6db..8f6817f346ba 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1146,7 +1146,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
- struct page_pool_params pp_params;
+ struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dc70a6bfaa6a..92ca739fac01 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- clk_enable(cpts->refclk);
+ err = clk_enable(cpts->refclk);
+ if (err)
+ return err;
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index cf0917b29e30..5251fc324221 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1091,20 +1091,22 @@ static int tsi108_get_mac(struct net_device *dev)
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+ u8 addr[ETH_ALEN];
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x06;
- dev->dev_addr[2] = 0xd2;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x00;
+ addr[0] = 0x00;
+ addr[1] = 0x06;
+ addr[2] = 0xd2;
+ addr[3] = 0x00;
+ addr[4] = 0x00;
if (0x8 == data->phy)
- dev->dev_addr[5] = 0x01;
+ addr[5] = 0x01;
else
- dev->dev_addr[5] = 0x02;
+ addr[5] = 0x02;
+ eth_hw_addr_set(dev, addr);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1114,12 +1116,13 @@ static int tsi108_get_mac(struct net_device *dev)
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
- dev->dev_addr[0] = (word2 >> 16) & 0xff;
- dev->dev_addr[1] = (word2 >> 24) & 0xff;
- dev->dev_addr[2] = (word1 >> 0) & 0xff;
- dev->dev_addr[3] = (word1 >> 8) & 0xff;
- dev->dev_addr[4] = (word1 >> 16) & 0xff;
- dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ addr[0] = (word2 >> 16) & 0xff;
+ addr[1] = (word2 >> 24) & 0xff;
+ addr[2] = (word1 >> 0) & 0xff;
+ addr[3] = (word1 >> 8) & 0xff;
+ addr[4] = (word1 >> 16) & 0xff;
+ addr[5] = (word1 >> 24) & 0xff;
+ eth_hw_addr_set(dev, addr);
}
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -1136,14 +1139,12 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
- int i;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (i = 0; i < 6; i++)
- /* +2 is for the offset of the HW addr type */
- dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ /* +2 is for the offset of the HW addr type */
+ eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89a31783fbb4..25739b182ac7 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -731,7 +731,7 @@ static int mse102x_probe_spi(struct spi_device *spi)
return 0;
}
-static int mse102x_remove_spi(struct spi_device *spi)
+static void mse102x_remove_spi(struct spi_device *spi)
{
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
@@ -741,8 +741,6 @@ static int mse102x_remove_spi(struct spi_device *spi)
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
-
- return 0;
}
static const struct of_device_id mse102x_match_table[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 7779a36da3c8..7c52796273a4 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -461,11 +461,9 @@ static int w5100_spi_probe(struct spi_device *spi)
return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
}
-static int w5100_spi_remove(struct spi_device *spi)
+static void w5100_spi_remove(struct spi_device *spi)
{
w5100_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b900ab5aef2a..64c7e26c3b75 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev)
lp->indirect_lock = devm_kmalloc(&pdev->dev,
sizeof(*lp->indirect_lock),
GFP_KERNEL);
+ if (!lp->indirect_lock)
+ return -ENOMEM;
spin_lock_init(lp->indirect_lock);
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 519599480b15..77fa2cb03aca 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
- goto error;
+ goto put_node;
}
dev_info(dev,
@@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
(unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
return 0;
+put_node:
+ of_node_put(lp->phy_node);
error:
free_netdev(ndev);
return rc;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index b1fc153125d9..45c3c4a1101b 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty)
*/
netif_stop_queue(sp->dev);
+ unregister_netdev(sp->dev);
+
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- unregister_netdev(sp->dev);
-
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6376b8485976..980f2be32f05 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __
ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
- if (ym->cmd != SIOCYAMSMCS)
- return -EINVAL;
- if (ym->bitrate > YAM_MAXBITRATE) {
+ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3646469433b1..fde1c492ca02 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1587,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
pcpu_sum = kvmalloc_array(num_possible_cpus(),
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
+ if (!pcpu_sum)
+ return;
+
netvsc_get_pcpu_stats(dev, pcpu_sum);
for_each_present_cpu(cpu) {
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 7db9cbd0f5de..6afdf1622944 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1304,7 +1304,7 @@ err_alloc_wq:
return ret;
}
-static int adf7242_remove(struct spi_device *spi)
+static void adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
@@ -1316,8 +1316,6 @@ static int adf7242_remove(struct spi_device *spi)
ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id adf7242_of_match[] = {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 7d67f41387f5..549d04b5f3d4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -100,6 +100,7 @@ struct at86rf230_local {
unsigned long cal_timeout;
bool is_tx;
bool is_tx_from_off;
+ bool was_tx;
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
@@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
if (ctx->free)
kfree(ctx);
- ieee802154_wake_queue(lp->hw);
+ if (lp->was_tx) {
+ lp->was_tx = 0;
+ dev_kfree_skb_any(lp->tx_skb);
+ ieee802154_wake_queue(lp->hw);
+ }
}
static void
@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
+ if (lp->is_tx) {
+ lp->was_tx = 1;
+ lp->is_tx = 0;
+ }
+
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_async_error_recover_complete);
}
@@ -1759,7 +1768,7 @@ free_dev:
return rc;
}
-static int at86rf230_remove(struct spi_device *spi)
+static void at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -1769,8 +1778,6 @@ static int at86rf230_remove(struct spi_device *spi)
ieee802154_free_hw(lp->hw);
at86rf230_debugfs_remove();
dev_dbg(&spi->dev, "unregistered at86rf230\n");
-
- return 0;
}
static const struct of_device_id at86rf230_of_match[] = {
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index ece6ff6049f6..187cbc634ce8 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -831,7 +831,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
finish:;
}
-static int ca8210_remove(struct spi_device *spi_device);
+static void ca8210_remove(struct spi_device *spi_device);
/**
* ca8210_spi_transfer_complete() - Called when a single spi transfer has
@@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete(
status
);
if (status != MAC_TRANSACTION_OVERFLOW) {
+ dev_kfree_skb_any(priv->tx_skb);
ieee802154_wake_queue(priv->hw);
return 0;
}
@@ -2974,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
@@ -3048,7 +3049,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
*
* Return: 0 or linux error code
*/
-static int ca8210_remove(struct spi_device *spi_device)
+static void ca8210_remove(struct spi_device *spi_device)
{
struct ca8210_priv *priv;
struct ca8210_platform_data *pdata;
@@ -3088,8 +3089,6 @@ static int ca8210_remove(struct spi_device *spi_device)
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS))
ca8210_test_interface_clear(priv);
}
-
- return 0;
}
/**
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 89c046b204e0..1e1f40f628a0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -1213,7 +1213,7 @@ err_hw_init:
return ret;
}
-static int cc2520_remove(struct spi_device *spi)
+static void cc2520_remove(struct spi_device *spi)
{
struct cc2520_private *priv = spi_get_drvdata(spi);
@@ -1222,8 +1222,6 @@ static int cc2520_remove(struct spi_device *spi)
ieee802154_unregister_hw(priv->hw);
ieee802154_free_hw(priv->hw);
-
- return 0;
}
static const struct spi_device_id cc2520_ids[] = {
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8caa61ec718f..36f1c5aa98fc 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
goto err_pib;
}
+ pib->channel = 13;
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 8dc04e2590b1..c927a5ae0d05 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__);
phy->symbol_duration = 16;
- phy->lifs_period = 40;
- phy->sifs_period = 12;
+ phy->lifs_period = 40 * phy->symbol_duration;
+ phy->sifs_period = 12 * phy->symbol_duration;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT |
@@ -1335,7 +1335,7 @@ free_dev:
return ret;
}
-static int mcr20a_remove(struct spi_device *spi)
+static void mcr20a_remove(struct spi_device *spi)
{
struct mcr20a_local *lp = spi_get_drvdata(spi);
@@ -1343,8 +1343,6 @@ static int mcr20a_remove(struct spi_device *spi)
ieee802154_unregister_hw(lp->hw);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id mcr20a_of_match[] = {
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ff83e00b77af..ee4cfbf2c5cc 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -1356,7 +1356,7 @@ err_ret:
return ret;
}
-static int mrf24j40_remove(struct spi_device *spi)
+static void mrf24j40_remove(struct spi_device *spi)
{
struct mrf24j40 *devrec = spi_get_drvdata(spi);
@@ -1366,8 +1366,6 @@ static int mrf24j40_remove(struct spi_device *spi)
ieee802154_free_hw(devrec->hw);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
-
- return 0;
}
static const struct of_device_id mrf24j40_of_match[] = {
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index d037682fb7ad..6782c2cbf542 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -2,7 +2,9 @@ config QCOM_IPA
tristate "Qualcomm IPA support"
depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index b1c6c0fcb654..f2989aac47a6 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include "linux/soc/qcom/qcom_aoss.h"
+
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@@ -64,6 +66,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@@ -72,6 +75,7 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
+ struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
@@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
+
+ ret = qmp_send(power->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
if (ret)
goto err_kfree;
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 2151805d7fbb..6f84f057a209 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 856e55a080a7..fe11910518d9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
+#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
if (!ipa->uc_powered)
return;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 16aa3a478e9e..3d0874331763 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
macsec_del_dev(macsec);
@@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- /* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
- const struct macsec_ops *ops;
- struct macsec_context ctx;
-
- ops = macsec_get_ops(netdev_priv(dev), &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_del_secy, &ctx);
- }
- }
-
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -4018,6 +4018,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
!macsec_check_offload(macsec->offload, macsec))
return -EOPNOTSUPP;
+ /* send_sci must be set to true when transmit sci explicitly is set */
+ if ((data && data[IFLA_MACSEC_SCI]) &&
+ (data && data[IFLA_MACSEC_INC_SCI])) {
+ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
+
+ if (!send_sci)
+ return -EINVAL;
+ }
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index eaa6fb3224bc..62723a7faa2d 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 966c3b4ad59d..e2273588c75b 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 5f4cd24a0241..4eba5a91075c 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
if (ret)
return ret;
- return clk_prepare_enable(priv->mdio_clk);
+ ret = clk_prepare_enable(priv->mdio_clk);
+ if (ret == 0)
+ mdelay(10);
+
+ return ret;
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 7d2abaf2b2c9..64fb76c1e395 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -187,6 +187,13 @@ static const struct regmap_config mscc_miim_regmap_config = {
.reg_stride = 4,
};
+static const struct regmap_config mscc_miim_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "phy",
+};
+
int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
struct regmap *mii_regmap, int status_offset)
{
@@ -250,7 +257,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
}
phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
- &mscc_miim_regmap_config);
+ &mscc_miim_phy_regmap_config);
if (IS_ERR(phy_regmap)) {
dev_err(&pdev->dev, "Unable to create phy register regmap\n");
return PTR_ERR(phy_regmap);
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 4300261e2f9e..378ee779061c 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5b6c0d120e09..29aa811af430 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1688,19 +1688,19 @@ static int qca808x_read_status(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->link && phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SMII;
-
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected.
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (!phydev->link) {
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected.
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index bb5104ae4610..3c683e0e40e9 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -854,6 +854,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
+ .soft_reset = genphy_soft_reset,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 211b5476a6f5..ce17b2af3218 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_DP83822_MISR1, 0);
+ err = phy_write(phydev, MII_DP83822_MISR2, 0);
if (err < 0)
return err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index fa71fb7a66b5..2702faf7b0f6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -553,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
else
mscr = 0;
- return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1121_PHY_MSCR_REG,
- MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+ return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -569,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
+ changed = err;
+
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- changed = err;
+ changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
@@ -1213,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- err = genphy_config_aneg(phydev);
- return 0;
+ return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)
@@ -1686,8 +1687,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1721,8 +1722,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index b7a5ae20edd5..68ee434f9dea 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 7e7904fee1d9..73f7962a37d3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -30,8 +30,12 @@
#define INTSRC_LINK_DOWN BIT(4)
#define INTSRC_REMOTE_FAULT BIT(5)
#define INTSRC_ANEG_COMPLETE BIT(6)
+#define INTSRC_ENERGY_DETECT BIT(7)
#define INTSRC_MASK 30
+#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \
+ INTSRC_ENERGY_DETECT)
+
#define BANK_ANALOG_DSP 0
#define BANK_WOL 1
#define BANK_BIST 3
@@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
- u16 val;
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
if (ret)
return ret;
- val = INTSRC_ANEG_PR
- | INTSRC_PARALLEL_FAULT
- | INTSRC_ANEG_LP_ACK
- | INTSRC_LINK_DOWN
- | INTSRC_REMOTE_FAULT
- | INTSRC_ANEG_COMPLETE;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES);
} else {
- val = 0;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, 0);
/* Ack any pending IRQ */
ret = meson_gxl_ack_interrupt(phydev);
@@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
+ irq_status &= INT_SOURCES;
+
if (irq_status == 0)
return IRQ_NONE;
- phy_trigger_machine(phydev);
+ /* Aneg-complete interrupt is used for link-up detection */
+ if (phydev->autoneg == AUTONEG_ENABLE &&
+ irq_status == INTSRC_ENERGY_DETECT)
+ return IRQ_HANDLED;
+
+ /* Give PHY some time before MAC starts sending data. This works
+ * around an issue where network doesn't come up properly.
+ */
+ if (!(irq_status & INTSRC_LINK_DOWN))
+ phy_queue_state_machine(phydev, msecs_to_jiffies(100));
+ else
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index ebfeeb3c67c1..7e3017e7a1c0 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
MODULE_AUTHOR("Nagaraju Lakkaraju");
MODULE_LICENSE("Dual MIT/GPL");
+
+MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
+MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74d8e1dc125f..ce0bb5951b81 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1746,6 +1746,9 @@ void phy_detach(struct phy_device *phydev)
phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
/*
* The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first.
@@ -1757,9 +1760,6 @@ void phy_detach(struct phy_device *phydev)
ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
-
- /* Assert the reset signal */
- phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 0c6c0d1843bc..c1512c9925a6 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -651,6 +651,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
else if (ret < 0)
return ERR_PTR(ret);
+ if (!fwnode_device_is_available(ref.fwnode)) {
+ fwnode_handle_put(ref.fwnode);
+ return NULL;
+ }
+
bus = sfp_bus_get(ref.fwnode);
fwnode_handle_put(ref.fwnode);
if (!bus)
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 8b5445a724ce..ff37f8ba6758 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -517,7 +517,7 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
}
-static int ks8995_remove(struct spi_device *spi)
+static void ks8995_remove(struct spi_device *spi)
{
struct ks8995_switch *ks = spi_get_drvdata(spi);
@@ -526,8 +526,6 @@ static int ks8995_remove(struct spi_device *spi)
/* assert reset */
if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
-
- return 0;
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1a627ba4b850..a31098981a65 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
- /* This check is no longer done by usbnet */
- if (skb->len < dev->net->hard_header_len)
+ /* At the end of the SKB, there's a header telling us how many packets
+ * are bundled into this buffer and where we can find an array of
+ * per-packet metadata (which contains elements encoded into u16).
+ */
+ if (skb->len < 4)
return 0;
-
skb_trim(skb, skb->len - 4);
rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
+
+ if (pkt_cnt == 0)
+ return 0;
+
+ /* Make sure that the bounds of the metadata array are inside the SKB
+ * (and in front of the counter at the end).
+ */
+ if (pkt_cnt * 2 + hdr_off > skb->len)
+ return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
- while (pkt_cnt--) {
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, hdr_off);
+
+ for (; ; pkt_cnt--, pkt_hdr++) {
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
- /* Check CRC or runt packet */
- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
- continue;
- }
-
- if (pkt_cnt == 0) {
- skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(skb, 2);
- skb_set_tail_pointer(skb, skb->len);
- skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(skb, pkt_hdr);
- return 1;
- }
+ if (pkt_len > skb->len)
+ return 0;
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
+ /* Check CRC or runt packet */
+ if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
+ pkt_len >= 2 + ETH_HLEN) {
+ bool last = (pkt_cnt == 0);
+
+ if (last) {
+ ax_skb = skb;
+ } else {
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
+ return 0;
+ }
ax_skb->len = pkt_len;
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
+
+ if (last)
+ return 1;
+
usbnet_skb_return(dev, ax_skb);
- } else {
- return 0;
}
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ /* Trim this packet away from the SKB */
+ if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ return 0;
}
- return 1;
}
static struct sk_buff *
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eb3817d70f2b..9b4dfa3001d6 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@@ -640,6 +645,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
/* reported with some C860 units */
.idProduct = 0x9050, /* C-860 */
ZAURUS_MASTER_INTERFACE,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 82bb5ed94c48..c0b8b4aa78f3 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e303b522efb5..15f91d691bba 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index cd33955df0b6..6a769df0b421 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -146,7 +146,7 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma);
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in),
- dev->rx_buf, IPHETH_BUF_SIZE,
+ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback,
dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b8e20a3f2b84..415f16662f88 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1537,11 +1537,8 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
- if (dev->domain_data.phyirq > 0) {
- local_irq_disable();
- generic_handle_irq(dev->domain_data.phyirq);
- local_irq_enable();
- }
+ if (dev->domain_data.phyirq > 0)
+ generic_handle_irq_safe(dev->domain_data.phyirq);
} else {
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 37e5f3495362..3353e761016d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1400,6 +1400,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc1e3dd67c04..a0f29482294d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
- netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
- index, ret);
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ index, ret);
return ret;
}
@@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (ret < 0 && ret != -ENODEV)
netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
index, ret);
@@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
do {
ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
if (ret < 0) {
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
netdev_warn(dev->net, "Error reading MII_ACCESS\n");
return ret;
}
@@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
@@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error reading MII_DATA\n");
goto done;
}
@@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
done:
mutex_unlock(&dev->phy_mutex);
+
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
return ret;
}
@@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
val = regval;
ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_DATA\n");
goto done;
}
@@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index b658510cc9a4..5a53e63d33a6 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
- if (len > ETH_FRAME_LEN)
+ if (len > ETH_FRAME_LEN || len > skb->len)
return 0;
/* the last packet of current skb */
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 8e717a0b559b..7984f2157d22 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -315,6 +320,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
/* reported with some C860 units */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 354a963075c5..d29fb9759cc9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
@@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 8e3b1c717c10..6063552cea9b 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -194,10 +194,9 @@ static int slic_ds26522_init_configure(struct spi_device *spi)
return 0;
}
-static int slic_ds26522_remove(struct spi_device *spi)
+static void slic_ds26522_remove(struct spi_device *spi)
{
pr_info("DS26522 module uninstalled\n");
- return 0;
}
static int slic_ds26522_probe(struct spi_device *spi)
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a46067c38bf5..0fad1331303c 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -59,9 +59,7 @@ out:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
- void *data)
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
struct wg_device *wg;
struct wg_peer *peer;
@@ -92,7 +90,24 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
-#endif
+
+static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_noise_expire_current_peer_keypairs(peer);
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification };
static int wg_stop(struct net_device *dev)
{
@@ -424,16 +439,18 @@ int __init wg_device_init(void)
{
int ret;
-#ifdef CONFIG_PM_SLEEP
ret = register_pm_notifier(&pm_notifier);
if (ret)
return ret;
-#endif
- ret = register_pernet_device(&pernet_ops);
+ ret = register_random_vmfork_notifier(&vm_notifier);
if (ret)
goto error_pm;
+ ret = register_pernet_device(&pernet_ops);
+ if (ret)
+ goto error_vm;
+
ret = rtnl_link_register(&link_ops);
if (ret)
goto error_pernet;
@@ -442,10 +459,10 @@ int __init wg_device_init(void)
error_pernet:
unregister_pernet_device(&pernet_ops);
+error_vm:
+ unregister_random_vmfork_notifier(&vm_notifier);
error_pm:
-#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
-#endif
return ret;
}
@@ -453,8 +470,7 @@ void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
unregister_pernet_device(&pernet_ops);
-#ifdef CONFIG_PM_SLEEP
+ unregister_random_vmfork_notifier(&vm_notifier);
unregister_pm_notifier(&pm_notifier);
-#endif
rcu_barrier();
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 62c453a21e49..7c1c2658cb5f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) {
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
- enum cfg80211_bss_frame_type ftype;
- u8 *ies;
- int ies_ch;
-
+ ieee80211_is_probe_resp(hdr->frame_control))
status->boottime_ns = ktime_get_boottime_ns();
- if (!ar->scan_channel)
- goto drop;
-
- ies = mgmt->u.beacon.variable;
-
- if (ieee80211_is_beacon(mgmt->frame_control))
- ftype = CFG80211_BSS_FTYPE_BEACON;
- else
- ftype = CFG80211_BSS_FTYPE_PRESP;
-
- ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
- skb_tail_pointer(skb) - ies,
- sband->band, ftype);
-
- if (ies_ch > 0 && ies_ch != channel) {
- ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "channel mismatched ds channel %d scan channel %d\n",
- ies_ch, channel);
- goto drop;
- }
- }
-
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ieee80211_rx_ni(ar->hw, skb);
return 0;
-
-drop:
- dev_kfree_skb(skb);
- return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 0eb13e5df517..d99140960a82 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_type)
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_type);
if (alt_path) {
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index 1364b0014488..208e73a16051 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLMEI) += iwlwifi/
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c21c0c68849a..85e704283755 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 790c96df58cb..c17ab53fcd8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0703e41403a6..35b8856e511f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,6 @@ enum iwl_legacy_cmds {
DEBUG_LOG_MSG = 0xf7,
/**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
- /**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index dd62a63956b3..e44c70b7c790 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 173a6991587b..4a7723eb8c1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e4ebda64cd52..efc6540d31af 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a21c3befd93b..a835214611ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index f90d4662c164..8e10ba88afb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -329,6 +329,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 83e3b731ad29..6651e78b39ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index dd58c8f9aa11..04addf964d83 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_REQ,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index d9733aaf6f6e..2f7f0f994ca3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
@@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
@@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
@@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
@@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
@@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 5f966af69720..468102a95e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index fb4920b01dbb..445c94adb076 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/vmalloc.h>
+#include <linux/err.h>
#include <linux/ieee80211.h>
#include <linux/netdevice.h>
@@ -1369,189 +1370,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1881,11 +1699,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
@@ -2045,7 +1858,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
- char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
@@ -2142,6 +1939,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
- debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
+ if (!IS_ERR(mvm->debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+ buf);
+ }
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6f4690e56a46..ae589b3b8c46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 65f4fe3ef504..709a3df57b10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
@@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
- [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
@@ -693,11 +619,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -1853,162 +1774,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -2520,7 +2285,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2570,11 +2334,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1dcbb0eb63c3..d78f40730594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -884,17 +884,6 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
@@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 87630d38dc52..1f8b97995b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6fa2c12f7955..9213f8518f10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 78450366312b..080a1587caa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_mei_get_ownership(mvm);
+ ret = iwl_mvm_mei_get_ownership(mvm);
mutex_unlock(&mvm->mutex);
- return 0;
+ return ret;
}
static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0febdcacbd42..94f40c4d2421 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a63386a01232..ef14584fc0a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ab0fe8565851..f99b7ba69fc3 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -669,7 +669,7 @@ err_free:
return ret;
}
-static int p54spi_remove(struct spi_device *spi)
+static void p54spi_remove(struct spi_device *spi)
{
struct p54s_priv *priv = spi_get_drvdata(spi);
@@ -684,8 +684,6 @@ static int p54spi_remove(struct spi_device *spi)
mutex_destroy(&priv->mutex);
p54_free_common(priv->hw);
-
- return 0;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8d54f9face2f..fc5725f6daee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
@@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index cd9f8ecf171f..ff1c7ec8c450 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1195,7 +1195,7 @@ out:
return err;
}
-static int libertas_spi_remove(struct spi_device *spi)
+static void libertas_spi_remove(struct spi_device *spi)
{
struct if_spi_card *card = spi_get_drvdata(spi);
struct lbs_private *priv = card->priv;
@@ -1212,8 +1212,6 @@ static int libertas_spi_remove(struct spi_device *spi)
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
-
- return 0;
}
static int if_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 2c2ed4b09efd..d2db52289399 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -240,7 +240,7 @@ free:
return ret;
}
-static int wilc_bus_remove(struct spi_device *spi)
+static void wilc_bus_remove(struct spi_device *spi)
{
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -248,8 +248,6 @@ static int wilc_bus_remove(struct spi_device *spi)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
-
- return 0;
}
static const struct of_device_id wilc_of_match[] = {
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 271ed2ce2d7f..fe0d220da44d 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -423,7 +423,7 @@ static int cw1200_spi_probe(struct spi_device *func)
}
/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int cw1200_spi_disconnect(struct spi_device *func)
+static void cw1200_spi_disconnect(struct spi_device *func)
{
struct hwbus_priv *self = spi_get_drvdata(func);
@@ -435,8 +435,6 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
}
cw1200_spi_off(dev_get_platdata(&func->dev));
-
- return 0;
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 5b894bd6237e..9df38726e8b0 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -327,14 +327,12 @@ out_free:
return ret;
}
-static int wl1251_spi_remove(struct spi_device *spi)
+static void wl1251_spi_remove(struct spi_device *spi)
{
struct wl1251 *wl = spi_get_drvdata(spi);
wl1251_free_hw(wl);
regulator_disable(wl->vio);
-
- return 0;
}
static struct spi_driver wl1251_spi_driver = {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 354a7e1c3315..7eae1ec2eb2b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -546,13 +546,11 @@ out_dev_put:
return ret;
}
-static int wl1271_remove(struct spi_device *spi)
+static void wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
platform_device_unregister(glue->core);
-
- return 0;
}
static struct spi_driver wl1271_spi_driver = {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d24b7a7993aa..990360d75cb6 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -824,15 +824,11 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- NULL, hotplug_status_changed,
- "%s/%s", dev->nodename,
- "hotplug-status");
- if (err)
- goto err;
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+ hotplug_status_changed,
+ "%s/%s", dev->nodename, "hotplug-status");
+ if (!err)
be->have_hotplug_status_watch = 1;
- }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b18246ad999..daa4e6106aac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
- if (unlikely(gnttab_query_foreign_access(
- queue->grant_tx_ref[id]) != 0)) {
+ if (unlikely(!gnttab_end_foreign_access_ref(
+ queue->grant_tx_ref[id], GNTMAP_readonly))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
- gnttab_end_foreign_access_ref(
- queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
@@ -842,6 +840,28 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_destroy_queues(np);
+}
+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;
@@ -968,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
- unsigned long ret;
int slots = 1;
int err = 0;
u32 verdict;
@@ -1010,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
- ret = gnttab_end_foreign_access_ref(ref, 0);
- BUG_ON(!ret);
+ if (!gnttab_end_foreign_access_ref(ref, 0)) {
+ dev_alert(dev,
+ "Grant still in use by backend domain\n");
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
+ return -EINVAL;
+ }
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
@@ -1232,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
&need_xdp_flush);
if (unlikely(err)) {
+ if (queue->info->broken) {
+ spin_unlock(&queue->rx_lock);
+ return 0;
+ }
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
@@ -1611,6 +1639,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
static const struct net_device_ops xennet_netdev_ops = {
+ .ndo_uninit = xennet_uninit,
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
@@ -1895,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev,
struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
- struct xen_netif_rx_sring *rxs;
+ struct xen_netif_rx_sring *rxs = NULL;
grant_ref_t gref;
int err;
@@ -1915,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, txs, 1, &gref);
if (err < 0)
- goto grant_tx_ring_fail;
+ goto fail;
queue->tx_ring_ref = gref;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating rx ring page");
- goto alloc_rx_ring_fail;
+ goto fail;
}
SHARED_RING_INIT(rxs);
FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
err = xenbus_grant_ring(dev, rxs, 1, &gref);
if (err < 0)
- goto grant_rx_ring_fail;
+ goto fail;
queue->rx_ring_ref = gref;
if (feature_split_evtchn)
@@ -1942,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev,
err = setup_netfront_single(queue);
if (err)
- goto alloc_evtchn_fail;
+ goto fail;
return 0;
/* If we fail to setup netfront, it is safe to just revoke access to
* granted pages because backend is not accessing it at this point.
*/
-alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
-grant_rx_ring_fail:
- free_page((unsigned long)rxs);
-alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
-grant_tx_ring_fail:
- free_page((unsigned long)txs);
-fail:
+ fail:
+ if (queue->rx_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(queue->rx_ring_ref, 0,
+ (unsigned long)rxs);
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ } else {
+ free_page((unsigned long)rxs);
+ }
+ if (queue->tx_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(queue->tx_ring_ref, 0,
+ (unsigned long)txs);
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ } else {
+ free_page((unsigned long)txs);
+ }
return err;
}
@@ -2103,22 +2138,6 @@ error:
return err;
}
-static void xennet_destroy_queues(struct netfront_info *info)
-{
- unsigned int i;
-
- for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
- struct netfront_queue *queue = &info->queues[i];
-
- if (netif_running(info->netdev))
- napi_disable(&queue->napi);
- netif_napi_del(&queue->napi);
- }
-
- kfree(info->queues);
- info->queues = NULL;
-}
-
static int xennet_create_page_pool(struct netfront_queue *queue)
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 5b833a9a83f8..a38e2fcdfd39 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -174,12 +174,11 @@ static int nfcmrvl_spi_probe(struct spi_device *spi)
return 0;
}
-static int nfcmrvl_spi_remove(struct spi_device *spi)
+static void nfcmrvl_spi_remove(struct spi_device *spi)
{
struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi);
nfcmrvl_nci_unregister_dev(drv_data->priv);
- return 0;
}
static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = {
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index d7db1a0e6be1..00d8ea6dcb5d 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1612,7 +1612,9 @@ free_nfc_dev:
nfc_digital_free_device(dev->nfc_digital_dev);
error:
+ usb_kill_urb(dev->in_urb);
usb_free_urb(dev->in_urb);
+ usb_kill_urb(dev->out_urb);
usb_free_urb(dev->out_urb);
usb_put_dev(dev->udev);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 4e723992e74c..169eacc0a32a 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -263,13 +263,11 @@ static int st_nci_spi_probe(struct spi_device *dev)
return r;
}
-static int st_nci_spi_remove(struct spi_device *dev)
+static void st_nci_spi_remove(struct spi_device *dev)
{
struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
ndlc_remove(phy->ndlc);
-
- return 0;
}
static struct spi_device_id st_nci_spi_id_table[] = {
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index b23f47936473..ed704bb77226 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1198,7 +1198,7 @@ err_disable_regulator:
return ret;
}
-static int st95hf_remove(struct spi_device *nfc_spi_dev)
+static void st95hf_remove(struct spi_device *nfc_spi_dev)
{
int result = 0;
unsigned char reset_cmd = ST95HF_COMMAND_RESET;
@@ -1236,8 +1236,6 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
/* disable regulator */
if (stcontext->st95hf_supply)
regulator_disable(stcontext->st95hf_supply);
-
- return 0;
}
/* Register as SPI protocol driver */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 29ca9c328df2..21d68664fe08 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2144,7 +2144,7 @@ err_destroy_lock:
return ret;
}
-static int trf7970a_remove(struct spi_device *spi)
+static void trf7970a_remove(struct spi_device *spi)
{
struct trf7970a *trf = spi_get_drvdata(spi);
@@ -2160,8 +2160,6 @@ static int trf7970a_remove(struct spi_device *spi)
regulator_disable(trf->regulator);
mutex_destroy(&trf->lock);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index fede05151f69..4081fc538ff4 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
return NTB_TOPO_NONE;
}
+static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & SPR_PPD_TOPO_MASK) {
+ case SPR_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+ case SPR_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
int gen4_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
@@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
}
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
- ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ if (pdev_is_ICX(pdev))
+ ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ else if (pdev_is_SPR(pdev))
+ ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
index 3fcd3fdce9ed..f91323eaf5ce 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -49,10 +49,14 @@
#define GEN4_PPD_CLEAR_TRN 0x0001
#define GEN4_PPD_LINKTRN 0x0008
#define GEN4_PPD_CONN_MASK 0x0300
+#define SPR_PPD_CONN_MASK 0x0700
#define GEN4_PPD_CONN_B2B 0x0200
#define GEN4_PPD_DEV_MASK 0x1000
#define GEN4_PPD_DEV_DSD 0x1000
#define GEN4_PPD_DEV_USD 0x0000
+#define SPR_PPD_DEV_MASK 0x4000
+#define SPR_PPD_DEV_DSD 0x4000
+#define SPR_PPD_DEV_USD 0x0000
#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010
#define GEN4_SLOTSTS 0xb05a
@@ -62,6 +66,10 @@
#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
+#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK)
+#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD)
+#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD)
+
#define GEN4_DB_COUNT 32
#define GEN4_DB_LINK 32
#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK)
@@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_SPR(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) &&
+ pdev->revision > PCI_DEVICE_REVISION_ICX_MAX)
+ return 1;
+ return 0;
+}
+
#endif
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index dd683cb58d09..6295e55ef85e 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb,
{
phys_addr_t mw_phys_addr;
resource_size_t mw_size;
- size_t struct_size;
int peer_widx;
int peers;
int ret;
@@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb,
if (peers <= 0)
return -EINVAL;
- struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
-
- ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers),
+ GFP_KERNEL);
if (!ntb->msi)
return -ENOMEM;
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 228c33b8d1d6..0a3873833594 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -6,7 +6,6 @@
#include <linux/blkdev.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nd.h>
@@ -89,10 +88,9 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
*/
cur_len = min(len, bv.bv_len);
- iobuf = kmap_atomic(bv.bv_page);
- err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
- cur_len, rw);
- kunmap_atomic(iobuf);
+ iobuf = bvec_kmap_local(&bv);
+ err = ndbr->do_io(ndbr, dev_offset, iobuf, cur_len, rw);
+ kunmap_local(iobuf);
if (err)
return err;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index da3f007a1211..9613e54c7a67 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/ndctl.h>
#include <linux/fs.h>
@@ -1164,17 +1163,15 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
*/
cur_len = min(len, bv.bv_len);
- mem = kmap_atomic(bv.bv_page);
+ mem = bvec_kmap_local(&bv);
if (rw)
- ret = arena_write_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
+ ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
else
- ret = arena_read_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
+ ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
- kunmap_atomic(mem);
+ kunmap_local(mem);
if (ret)
return ret;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 8b52e5144f08..e5a58520d398 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -4,7 +4,6 @@
*/
#include <linux/blkdev.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 9dc7f3edd42b..5bbe31b08581 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -11,7 +11,6 @@
#include <linux/blkdev.h>
#include <linux/fcntl.h>
#include <linux/async.h>
-#include <linux/genhd.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 10351d5b49fa..c6a648fd8744 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
* parent bio. Otherwise directly call nd_region flush.
*/
if (bio && bio->bi_iter.bi_sector != -1) {
- struct bio *child = bio_alloc(GFP_ATOMIC, 0);
+ struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
+ GFP_ATOMIC);
if (!child)
return -ENOMEM;
- bio_copy_dev(child, bio);
- child->bi_opf = REQ_PREFLUSH;
+ bio_clone_blkg_association(child, bio);
child->bi_iter.bi_sector = -1;
bio_chain(child, bio);
submit_bio(child);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 58eda16f5c53..c31e184bfa45 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -5,7 +5,6 @@
#include <linux/memremap.h>
#include <linux/blkdev.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 59cfe13ea8a8..1f51a2361429 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -3,6 +3,7 @@
#define __NVDIMM_PMEM_H__
#include <linux/page-flags.h>
#include <linux/badblocks.h>
+#include <linux/memremap.h>
#include <linux/types.h>
#include <linux/pfn_t.h>
#include <linux/fs.h>
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index dc0450ca23a3..d6d056963c06 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -24,6 +24,14 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
+config NVME_VERBOSE_ERRORS
+ bool "NVMe verbose error reporting"
+ depends on NVME_CORE
+ help
+ This option enables verbose reporting for NVMe errors. The
+ error translation table will grow the kernel image size by
+ about 4 KB.
+
config NVME_HWMON
bool "NVMe hardware monitoring"
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index dfaacd472e5d..476c5c988496 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
-nvme-core-y := core.o ioctl.o
+nvme-core-y := core.o ioctl.o constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
new file mode 100644
index 000000000000..7d49eb34b348
--- /dev/null
+++ b/drivers/nvme/host/constants.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver verbose errors
+ * Copyright (c) 2022, Oracle and/or its affiliates
+ */
+
+#include <linux/blkdev.h>
+#include "nvme.h"
+
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+static const char * const nvme_ops[] = {
+ [nvme_cmd_flush] = "Flush",
+ [nvme_cmd_write] = "Write",
+ [nvme_cmd_read] = "Read",
+ [nvme_cmd_write_uncor] = "Write Uncorrectable",
+ [nvme_cmd_compare] = "Compare",
+ [nvme_cmd_write_zeroes] = "Write Zeros",
+ [nvme_cmd_dsm] = "Dataset Management",
+ [nvme_cmd_verify] = "Verify",
+ [nvme_cmd_resv_register] = "Reservation Register",
+ [nvme_cmd_resv_report] = "Reservation Report",
+ [nvme_cmd_resv_acquire] = "Reservation Acquire",
+ [nvme_cmd_resv_release] = "Reservation Release",
+ [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
+ [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
+ [nvme_cmd_zone_append] = "Zone Management Append",
+};
+
+static const char * const nvme_admin_ops[] = {
+ [nvme_admin_delete_sq] = "Delete SQ",
+ [nvme_admin_create_sq] = "Create SQ",
+ [nvme_admin_get_log_page] = "Get Log Page",
+ [nvme_admin_delete_cq] = "Delete CQ",
+ [nvme_admin_create_cq] = "Create CQ",
+ [nvme_admin_identify] = "Identify",
+ [nvme_admin_abort_cmd] = "Abort Command",
+ [nvme_admin_set_features] = "Set Features",
+ [nvme_admin_get_features] = "Get Features",
+ [nvme_admin_async_event] = "Async Event",
+ [nvme_admin_ns_mgmt] = "Namespace Management",
+ [nvme_admin_activate_fw] = "Activate Firmware",
+ [nvme_admin_download_fw] = "Download Firmware",
+ [nvme_admin_dev_self_test] = "Device Self Test",
+ [nvme_admin_ns_attach] = "Namespace Attach",
+ [nvme_admin_keep_alive] = "Keep Alive",
+ [nvme_admin_directive_send] = "Directive Send",
+ [nvme_admin_directive_recv] = "Directive Receive",
+ [nvme_admin_virtual_mgmt] = "Virtual Management",
+ [nvme_admin_nvme_mi_send] = "NVMe Send MI",
+ [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_dbbuf] = "Doorbell Buffer Config",
+ [nvme_admin_format_nvm] = "Format NVM",
+ [nvme_admin_security_send] = "Security Send",
+ [nvme_admin_security_recv] = "Security Receive",
+ [nvme_admin_sanitize_nvm] = "Sanitize NVM",
+ [nvme_admin_get_lba_status] = "Get LBA Status",
+};
+
+static const char * const nvme_statuses[] = {
+ [NVME_SC_SUCCESS] = "Success",
+ [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
+ [NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
+ [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
+ [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
+ [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
+ [NVME_SC_INTERNAL] = "Internal Error",
+ [NVME_SC_ABORT_REQ] = "Command Abort Requested",
+ [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
+ [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
+ [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
+ [NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
+ [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
+ [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
+ [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
+ [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
+ [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
+ [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
+ [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
+ [NVME_SC_OP_DENIED] = "Operation Denied",
+ [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
+ [NVME_SC_RESERVED] = "Reserved",
+ [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
+ [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
+ [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
+ [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
+ [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
+ [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
+ [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
+ [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
+ [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
+ [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
+ [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
+ [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
+ [NVME_SC_LBA_RANGE] = "LBA Out of Range",
+ [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
+ [NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
+ [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
+ [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
+ [NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
+ [NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
+ [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
+ [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
+ [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
+ [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
+ [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
+ [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
+ [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
+ [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
+ [NVME_SC_INVALID_FORMAT] = "Invalid Format",
+ [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
+ [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
+ [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
+ [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
+ [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
+ [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
+ [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
+ [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
+ [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
+ [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
+ [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
+ [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
+ [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
+ [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
+ [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
+ [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
+ [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
+ [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
+ [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
+ [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
+ [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
+ [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
+ [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
+ [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
+ [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
+ [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
+ [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
+ [NVME_SC_INVALID_PI] = "Invalid Protection Information",
+ [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
+ [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
+ [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
+ [NVME_SC_ZONE_FULL] = "Zone Is Full",
+ [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
+ [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
+ [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
+ [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
+ [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
+ [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
+ [NVME_SC_WRITE_FAULT] = "Write Fault",
+ [NVME_SC_READ_ERROR] = "Unrecovered Read Error",
+ [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
+ [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
+ [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
+ [NVME_SC_COMPARE_FAILED] = "Compare Failure",
+ [NVME_SC_ACCESS_DENIED] = "Access Denied",
+ [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
+ [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
+ [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
+ [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
+ [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
+};
+
+const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ status &= 0x7ff;
+ if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
+ return nvme_statuses[status & 0x7ff];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
+ return nvme_ops[opcode];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
+ return nvme_admin_ops[opcode];
+ return "Unknown";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5e0bfda04bd7..cd6eac8e3dd6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -299,6 +299,37 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay);
}
+static void nvme_log_error(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ if (ns) {
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ nvme_get_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
+ (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+ return;
+ }
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
+ dev_name(nr->ctrl->device),
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+}
+
enum nvme_disposition {
COMPLETE,
RETRY,
@@ -339,6 +370,8 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+ nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
@@ -368,6 +401,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_complete_batch_req(struct request *req)
{
+ trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
nvme_end_req_zoned(req);
}
@@ -561,7 +595,7 @@ static void nvme_free_ns_head(struct kref *ref)
container_of(ref, struct nvme_ns_head, ref);
nvme_mpath_remove_disk(head);
- ida_simple_remove(&head->subsys->ns_ida, head->instance);
+ ida_free(&head->subsys->ns_ida, head->instance);
cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
@@ -606,13 +640,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
req->rq_flags |= RQF_DONTPREP;
}
-static inline unsigned int nvme_req_op(struct nvme_command *cmd)
-{
- return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
-}
-
-static inline void nvme_init_request(struct request *req,
- struct nvme_command *cmd)
+/* initialize a passthrough request */
+void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
if (req->q->queuedata)
req->timeout = NVME_IO_TIMEOUT;
@@ -628,30 +657,7 @@ static inline void nvme_init_request(struct request *req,
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
-
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags)
-{
- struct request *req;
-
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
- if (!IS_ERR(req))
- nvme_init_request(req, cmd);
- return req;
-}
-EXPORT_SYMBOL_GPL(nvme_alloc_request);
-
-static struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
-{
- struct request *req;
-
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
- if (!IS_ERR(req))
- nvme_init_request(req, cmd);
- return req;
-}
+EXPORT_SYMBOL_GPL(nvme_init_request);
/*
* For something we're not in a state to send to the device the default action
@@ -757,6 +763,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
struct streams_directive_params s;
+ u16 nssa;
int ret;
if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
@@ -772,14 +779,16 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl)
if (ret)
goto out_disable_stream;
- ctrl->nssa = le16_to_cpu(s.nssa);
- if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
+ nssa = le16_to_cpu(s.nssa);
+ if (nssa < BLK_MAX_WRITE_HINTS - 1) {
dev_info(ctrl->device, "too few streams (%u) available\n",
- ctrl->nssa);
+ nssa);
+ /* this condition is not an error: streams are optional */
+ ret = 0;
goto out_disable_stream;
}
- ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
+ ctrl->nr_streams = min_t(u16, nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0;
@@ -1049,8 +1058,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response
*/
-static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
- bool at_head)
+static int nvme_execute_rq(struct request *rq, bool at_head)
{
blk_status_t status;
@@ -1075,11 +1083,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int ret;
if (qid == NVME_QID_ANY)
- req = nvme_alloc_request(q, cmd, flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
- req = nvme_alloc_request_qid(q, cmd, flags, qid);
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid ? qid - 1 : 0);
+
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, cmd);
if (timeout)
req->timeout = timeout;
@@ -1090,7 +1101,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(NULL, req, at_head);
+ ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1206,12 +1217,11 @@ int nvme_execute_passthru_rq(struct request *rq)
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
u32 effects;
int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- ret = nvme_execute_rq(disk, rq, false);
+ ret = nvme_execute_rq(rq, false);
if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects, cmd, ret);
@@ -1270,14 +1280,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
return;
}
- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (IS_ERR(rq)) {
/* allocation failure, reset the controller */
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
nvme_reset_ctrl(ctrl);
return;
}
+ nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
@@ -1682,13 +1693,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
-static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
-{
- return !uuid_is_null(&ids->uuid) ||
- memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
- memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
-}
-
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
@@ -1722,7 +1726,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
-static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -1738,7 +1742,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return 0;
+ return;
+
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
* The NVMe over Fabrics specification only supports metadata as
@@ -1746,7 +1751,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return -EINVAL;
+ return;
ns->features |= NVME_NS_EXT_LBAS;
@@ -1773,8 +1778,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else
ns->features |= NVME_NS_METADATA_SUPPORTED;
}
-
- return 0;
}
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -1915,9 +1918,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
+ nvme_configure_metadata(ns, id);
nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id);
@@ -1933,7 +1934,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- goto out;
+ return ret;
}
if (nvme_ns_head_multipath(ns->head)) {
@@ -1948,16 +1949,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
return 0;
out_unfreeze:
- blk_mq_unfreeze_queue(ns->disk->queue);
-out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
*/
if (ret == -ENODEV) {
ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
+ blk_mq_unfreeze_queue(ns->disk->queue);
return ret;
}
@@ -1979,7 +1980,7 @@ static char nvme_pr_type(enum pr_type type)
default:
return 0;
}
-};
+}
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, u8 data[16])
@@ -2567,7 +2568,7 @@ static void nvme_release_subsystem(struct device *dev)
container_of(dev, struct nvme_subsystem, dev);
if (subsys->instance >= 0)
- ida_simple_remove(&nvme_instance_ida, subsys->instance);
+ ida_free(&nvme_instance_ida, subsys->instance);
kfree(subsys);
}
@@ -2992,6 +2993,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_namespaces = le32_to_cpu(id->mnan);
ctrl->ctratt = le32_to_cpu(id->ctratt);
+ ctrl->cntrltype = id->cntrltype;
+ ctrl->dctype = id->dctype;
+
if (id->rtd3e) {
/* us -> s */
u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
@@ -3525,6 +3529,40 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
+static ssize_t cntrltype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_CTRL_IO] = "io\n",
+ [NVME_CTRL_DISC] = "discovery\n",
+ [NVME_CTRL_ADMIN] = "admin\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->cntrltype]);
+}
+static DEVICE_ATTR_RO(cntrltype);
+
+static ssize_t dctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_DCTYPE_NOT_REPORTED] = "none\n",
+ [NVME_DCTYPE_DDC] = "ddc\n",
+ [NVME_DCTYPE_CDC] = "cdc\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->dctype]);
+}
+static DEVICE_ATTR_RO(dctype);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3546,6 +3584,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reconnect_delay.attr,
&dev_attr_fast_io_fail_tmo.attr,
&dev_attr_kato.attr,
+ &dev_attr_cntrltype.attr,
+ &dev_attr_dctype.attr,
NULL
};
@@ -3600,16 +3640,24 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
return NULL;
}
-static int __nvme_check_ids(struct nvme_subsystem *subsys,
- struct nvme_ns_head *new)
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_ids *ids)
{
+ bool has_uuid = !uuid_is_null(&ids->uuid);
+ bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
+ bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (nvme_ns_ids_valid(&new->ids) &&
- nvme_ns_ids_equal(&new->ids, &h->ids))
+ if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
+ return -EINVAL;
+ if (has_nguid &&
+ memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
+ return -EINVAL;
+ if (has_eui64 &&
+ memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
return -EINVAL;
}
@@ -3618,7 +3666,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
static void nvme_cdev_rel(struct device *dev)
{
- ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+ ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
}
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
@@ -3632,7 +3680,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
{
int minor, ret;
- minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
+ minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
if (minor < 0)
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
@@ -3695,7 +3743,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head = kzalloc(size, GFP_KERNEL);
if (!head)
goto out;
- ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
+ ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
if (ret < 0)
goto out_free_head;
head->instance = ret;
@@ -3708,13 +3756,6 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ids = *ids;
kref_init(&head->ref);
- ret = __nvme_check_ids(ctrl->subsys, head);
- if (ret) {
- dev_err(ctrl->device,
- "duplicate IDs for nsid %d\n", nsid);
- goto out_cleanup_srcu;
- }
-
if (head->ids.csi) {
ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
if (ret)
@@ -3734,7 +3775,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
out_ida_remove:
- ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
+ ida_free(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
out:
@@ -3743,16 +3784,56 @@ out:
return ERR_PTR(ret);
}
+static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
+ struct nvme_ns_ids *ids)
+{
+ struct nvme_subsystem *s;
+ int ret = 0;
+
+ /*
+ * Note that this check is racy as we try to avoid holding the global
+ * lock over the whole ns_head creation. But it is only intended as
+ * a sanity check anyway.
+ */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(s, &nvme_subsystems, entry) {
+ if (s == this)
+ continue;
+ mutex_lock(&s->lock);
+ ret = nvme_subsys_check_duplicate_ids(s, ids);
+ mutex_unlock(&s->lock);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&nvme_subsystems_lock);
+
+ return ret;
+}
+
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
struct nvme_ns_ids *ids, bool is_shared)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
- int ret = 0;
+ int ret;
+
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "globally duplicate IDs for nsid %d\n", nsid);
+ return ret;
+ }
mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs in subsystem for nsid %d\n",
+ nsid);
+ goto out_unlock;
+ }
head = nvme_alloc_ns_head(ctrl, nsid, ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
@@ -3772,6 +3853,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
nsid);
goto out_put_ns_head;
}
+
+ if (!multipath && !list_empty(&head->list)) {
+ dev_warn(ctrl->device,
+ "Found shared namespace %d, but multipathing not supported.\n",
+ nsid);
+ dev_warn_once(ctrl->device,
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ }
}
list_add_tail_rcu(&ns->siblings, &head->list);
@@ -3860,13 +3949,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_cleanup_disk;
/*
- * Without the multipath code enabled, multiple controller per
- * subsystems are visible as devices and thus we cannot use the
- * subsystem instance.
+ * If multipathing is enabled, the device name for all disks and not
+ * just those that represent shared namespaces needs to be based on the
+ * subsystem instance. Using the controller instance for private
+ * namespaces could lead to naming collisions between shared and private
+ * namespaces if they don't use a common numbering scheme.
+ *
+ * If multipathing is not enabled, disk names must use the controller
+ * instance as shared namespaces will show up as multiple block
+ * devices.
*/
- if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
+ if (ns->head->disk) {
+ sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->instance, ns->head->instance);
+ disk->flags |= GENHD_FL_HIDDEN;
+ } else if (multipath) {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ } else {
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance);
+ }
if (nvme_update_ns_info(ns, id))
goto out_unlink_ns;
@@ -4231,6 +4334,13 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
+static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
+{
+ char *envp[2] = { envdata, NULL };
+
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+}
+
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
char *envp[2] = { NULL, NULL };
@@ -4253,7 +4363,14 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -4398,6 +4515,8 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
+
+ nvme_change_uevent(ctrl, "NVME_EVENT=connected");
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -4431,7 +4550,7 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_subsystem *subsys = ctrl->subsys;
if (!subsys || ctrl->instance != subsys->instance)
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ ida_free(&nvme_instance_ida, ctrl->instance);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
@@ -4490,7 +4609,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out;
}
- ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
if (ret < 0)
goto out;
ctrl->instance = ret;
@@ -4531,7 +4650,7 @@ out_free_name:
nvme_put_ctrl(ctrl);
kfree_const(ctrl->device->kobj.name);
out_release_instance:
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ ida_free(&nvme_instance_ida, ctrl->instance);
out:
if (ctrl->discard_page)
__free_page(ctrl->discard_page);
@@ -4566,7 +4685,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- blk_set_queue_dying(ns->queue);
+ blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns);
set_capacity_and_notify(ns->disk, 0);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7ae041e2b3fb..ee79a6d639b4 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -144,11 +144,10 @@ EXPORT_SYMBOL_GPL(nvmf_get_address);
*/
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
@@ -272,7 +271,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) {
- case (NVME_SC_CONNECT_INVALID_PARAM):
+ case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
@@ -873,7 +872,7 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts)
{
if ((opts->mask & required_opts) != required_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & required_opts) &&
@@ -923,7 +922,7 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
if (opts->mask & ~allowed_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & opts->mask) &&
@@ -1092,7 +1091,6 @@ static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
{
struct nvme_ctrl *ctrl;
- int ret = 0;
mutex_lock(&nvmf_dev_mutex);
ctrl = seq_file->private;
@@ -1106,7 +1104,7 @@ static int nvmf_dev_show(struct seq_file *seq_file, void *private)
out_unlock:
mutex_unlock(&nvmf_dev_mutex);
- return ret;
+ return 0;
}
static int nvmf_dev_open(struct inode *inode, struct file *file)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c3203ff1c654..1e3a09cad961 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 71b3108c22f0..080f85f4105f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -259,7 +259,7 @@ nvme_fc_free_lport(struct kref *ref)
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
@@ -399,7 +399,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed;
}
- idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -439,7 +439,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
return 0;
out_ida_put:
- ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+ ida_free(&nvme_fc_local_port_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_reghost_failed:
@@ -535,7 +535,7 @@ nvme_fc_free_rport(struct kref *ref)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list));
- ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+ ida_free(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
@@ -713,7 +713,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
goto out_lport_put;
}
- idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_kfree_rport;
@@ -2393,7 +2393,7 @@ nvme_fc_ctrl_free(struct kref *ref)
put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
@@ -2916,11 +2916,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.tagset = &ctrl->tag_set;
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tag_set;
- }
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
@@ -3472,7 +3470,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_free_ctrl;
@@ -3635,7 +3633,7 @@ out_free_queues:
kfree(ctrl->queues);
out_free_ida:
put_device(ctrl->dev);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 22314962842d..554566371ffa 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -56,7 +56,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u64 *result, unsigned timeout)
+ u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -66,17 +66,32 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, cmd);
if (timeout)
req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+ if (!vec)
+ ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL);
+ else {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct iovec *iov = fast_iov;
+ struct iov_iter iter;
+
+ ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
+ goto out;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter,
+ GFP_KERNEL);
+ kfree(iov);
+ }
if (ret)
goto out;
bio = req->bio;
@@ -170,7 +185,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length,
- metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
+ metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
+ false);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -224,7 +240,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
- 0, &result, timeout);
+ 0, &result, timeout, false);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -235,7 +251,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- struct nvme_passthru_cmd64 __user *ucmd)
+ struct nvme_passthru_cmd64 __user *ucmd, bool vec)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
@@ -270,7 +286,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
- 0, &cmd.result, timeout);
+ 0, &cmd.result, timeout, vec);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -296,7 +312,7 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp);
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
@@ -340,7 +356,9 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD:
- return nvme_user_cmd64(ns->ctrl, ns, argp);
+ return nvme_user_cmd64(ns->ctrl, ns, argp, false);
+ case NVME_IOCTL_IO64_CMD_VEC:
+ return nvme_user_cmd64(ns->ctrl, ns, argp, true);
default:
return -ENOTTY;
}
@@ -480,7 +498,7 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp);
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f8bf6606eb2f..1b31f19e1053 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -5,10 +5,11 @@
#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#include <trace/events/block.h>
#include "nvme.h"
-static bool multipath = true;
+bool multipath = true;
module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
@@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
blk_freeze_queue_start(h->disk->queue);
}
-/*
- * If multipathing is enabled we need to always use the subsystem instance
- * number for numbering our devices to avoid conflicts between subsystems that
- * have multiple controllers and thus use the multipath-aware subsystem node
- * and those that have a single controller and use the controller node
- * directly.
- */
-bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
-{
- if (!multipath)
- return false;
- if (!ns->head->disk) {
- sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
- ns->head->instance);
- return true;
- }
- sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
- ns->ctrl->instance, ns->head->instance);
- *flags = GENHD_FL_HIDDEN;
- return true;
-}
-
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
} else {
dev_warn_ratelimited(dev, "no available path - failing I/O\n");
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
}
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -848,7 +826,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- blk_set_queue_dying(head->disk->queue);
+ blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
@@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
if (ana_log_size > ctrl->ana_log_size) {
nvme_mpath_stop(ctrl);
nvme_mpath_uninit(ctrl);
- ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
+ ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf)
return -ENOMEM;
}
@@ -915,7 +893,7 @@ out_uninit:
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
- kfree(ctrl->ana_log_buf);
+ kvfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
ctrl->ana_log_size = 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a162f6c6da6e..1ea908d43e17 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -280,7 +280,6 @@ struct nvme_ctrl {
u16 crdt[3];
u16 oncs;
u16 oacs;
- u16 nssa;
u16 nr_streams;
u16 sqsize;
u32 max_namespaces;
@@ -349,6 +348,9 @@ struct nvme_ctrl {
unsigned long discard_page_busy;
struct nvme_fault_inject fault_inject;
+
+ enum nvme_ctrl_type cntrltype;
+ enum nvme_dctype dctype;
};
enum nvme_iopolicy {
@@ -696,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+{
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
#define NVME_QID_ANY -1
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags);
+void nvme_init_request(struct request *req, struct nvme_command *cmd);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
@@ -768,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
-bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
@@ -791,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req)
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
+extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
#else
+#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
return false;
}
-static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
- int *flags)
-{
- return false;
-}
static inline void nvme_failover_req(struct request *req)
{
}
@@ -894,6 +896,14 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
#endif
+static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
+{
+ ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+ if (IS_ERR(ctrl->connect_q))
+ return PTR_ERR(ctrl->connect_q);
+ return 0;
+}
+
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
@@ -930,4 +940,23 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const unsigned char *nvme_get_error_status_str(u16 status);
+const unsigned char *nvme_get_opcode_str(u8 opcode);
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d8585df2c2fd..2e98ac3f3ad6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -424,8 +425,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
+static int nvme_pci_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1428,12 +1430,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, aborting\n",
req->tag, nvmeq->qid);
- abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
+ nvme_init_request(abort_req, &cmd);
abort_req->end_io_data = NULL;
blk_execute_rq_nowait(abort_req, false, abort_endio);
@@ -1722,7 +1725,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.timeout = nvme_timeout,
};
@@ -1732,7 +1735,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
@@ -2475,9 +2478,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, &cmd);
req->end_io_data = nvmeq;
@@ -3391,7 +3395,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
- NVME_QUIRK_DEALLOCATE_ZEROES, },
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 850f84d204d0..d9f19d901313 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -978,11 +978,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tag_set;
- }
}
ret = nvme_rdma_start_io_queues(ctrl);
@@ -1200,6 +1198,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
@@ -1282,6 +1281,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL);
}
+static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+}
+
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
@@ -1293,13 +1308,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return;
- if (blk_integrity_rq(rq)) {
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
- }
-
if (req->use_sig_mr)
pool = &queue->qp->sig_mrs;
@@ -1308,9 +1316,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ nvme_rdma_dma_unmap_req(ibdev, rq);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1521,22 +1527,11 @@ mr_put:
return -EINVAL;
}
-static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, struct nvme_command *c)
+static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
+ int *count, int *pi_count)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
- int pi_count = 0;
- int count, ret;
-
- req->num_sge = 1;
- refcount_set(&req->ref, 2); /* send and recv completions */
-
- c->common.flags |= NVME_CMD_SGL_METABUF;
-
- if (!blk_rq_nr_phys_segments(rq))
- return nvme_rdma_set_sg_null(c);
+ int ret;
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
@@ -1548,9 +1543,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
- req->data_sgl.nents, rq_dma_dir(rq));
- if (unlikely(count <= 0)) {
+ *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
+ if (unlikely(*count <= 0)) {
ret = -EIO;
goto out_free_table;
}
@@ -1569,16 +1564,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl);
- pi_count = ib_dma_map_sg(ibdev,
- req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents,
- rq_dma_dir(rq));
- if (unlikely(pi_count <= 0)) {
+ *pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(*pi_count <= 0)) {
ret = -EIO;
goto out_free_pi_table;
}
}
+ return 0;
+
+out_free_pi_table:
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+out_free_table:
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ return ret;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
+ int count, ret;
+
+ req->num_sge = 1;
+ refcount_set(&req->ref, 2); /* send and recv completions */
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
+ if (unlikely(ret))
+ return ret;
+
if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out;
@@ -1602,23 +1631,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_pi_sg;
+ goto out_dma_unmap_req;
return 0;
-out_unmap_pi_sg:
- if (blk_integrity_rq(rq))
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
-out_free_pi_table:
- if (blk_integrity_rq(rq))
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
-out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
-out_free_table:
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+out_dma_unmap_req:
+ nvme_rdma_dma_unmap_req(ibdev, rq);
return ret;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ceb28675fdf..ad3a2bf2f1e9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -30,6 +30,44 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/* lockdep can detect a circular dependency of the form
+ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+ * because dependencies are tracked for both nvme-tcp and user contexts. Using
+ * a separate class prevents lockdep from conflating nvme-tcp socket use with
+ * user-space socket API use.
+ */
+static struct lock_class_key nvme_tcp_sk_key[2];
+static struct lock_class_key nvme_tcp_slock_key[2];
+
+static void nvme_tcp_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
+ &nvme_tcp_slock_key[0],
+ "sk_lock-AF_INET-NVME",
+ &nvme_tcp_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
+ &nvme_tcp_slock_key[1],
+ "sk_lock-AF_INET6-NVME",
+ &nvme_tcp_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+#else
+static void nvme_tcp_reclassify_socket(struct socket *sock) { }
+#endif
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -44,6 +82,8 @@ struct nvme_tcp_request {
u32 data_len;
u32 pdu_len;
u32 pdu_sent;
+ u32 h2cdata_left;
+ u32 h2cdata_offset;
u16 ttag;
__le16 status;
struct list_head entry;
@@ -95,6 +135,7 @@ struct nvme_tcp_queue {
struct nvme_tcp_request *request;
int queue_size;
+ u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
unsigned long flags;
@@ -572,23 +613,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
- struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_data_pdu *data = req->pdu;
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
+ u32 h2cdata_sent = req->pdu_len;
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
req->pdu_sent = 0;
+ req->h2cdata_left -= req->pdu_len;
+ req->h2cdata_offset += h2cdata_sent;
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
- data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (!req->h2cdata_left)
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
if (queue->hdr_digest)
data->hdr.flags |= NVME_TCP_F_HDGST;
if (queue->data_digest)
@@ -597,9 +641,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->hdr.pdo = data->hdr.hlen + hdgst;
data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
- data->ttag = pdu->ttag;
+ data->ttag = req->ttag;
data->command_id = nvme_cid(rq);
- data->data_offset = pdu->r2t_offset;
+ data->data_offset = cpu_to_le32(req->h2cdata_offset);
data->data_length = cpu_to_le32(req->pdu_len);
}
@@ -609,6 +653,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
struct nvme_tcp_request *req;
struct request *rq;
u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+ u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -633,14 +678,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return -EPROTO;
}
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ if (unlikely(r2t_offset < req->data_sent)) {
dev_err(queue->ctrl->ctrl.device,
"req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+ rq->tag, r2t_offset, req->data_sent);
return -EPROTO;
}
- nvme_tcp_setup_h2c_data_pdu(req, pdu);
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+ req->ttag = pdu->ttag;
+
+ nvme_tcp_setup_h2c_data_pdu(req);
nvme_tcp_queue_request(req, false, true);
return 0;
@@ -913,13 +963,22 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
int req_data_len = req->data_len;
+ u32 h2cdata_left = req->h2cdata_left;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
@@ -964,7 +1023,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DDGST;
req->offset = 0;
} else {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
}
return 1;
}
@@ -1022,9 +1084,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ if (!req->h2cdata_left)
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ else
+ ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE);
if (unlikely(ret <= 0))
return ret;
@@ -1044,6 +1111,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
size_t offset = req->offset;
+ u32 h2cdata_left = req->h2cdata_left;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
@@ -1061,7 +1129,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
return ret;
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1253,6 +1324,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
struct msghdr msg = {};
struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst;
+ u32 maxh2cdata;
int ret;
icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1336,6 +1408,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
+ maxh2cdata = le32_to_cpu(icresp->maxdata);
+ if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+ pr_err("queue %d: invalid maxh2cdata returned %u\n",
+ nvme_tcp_queue_id(queue), maxh2cdata);
+ goto free_icresp;
+ }
+ queue->maxh2cdata = maxh2cdata;
+
ret = 0;
free_icresp:
kfree(icresp);
@@ -1427,6 +1507,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
goto err_destroy_mutex;
}
+ nvme_tcp_reclassify_socket(queue->sock);
+
/* Single syn retry */
tcp_sock_set_syncnt(queue->sock->sk, 1);
@@ -1674,7 +1756,7 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
{
- int i, ret = 0;
+ int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_start_queue(ctrl, i);
@@ -1714,8 +1796,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i,
- ctrl->sqsize + 1);
+ ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
if (ret)
goto out_free_queues;
}
@@ -1825,11 +1906,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q)) {
- ret = PTR_ERR(ctrl->connect_q);
+ ret = nvme_ctrl_init_connect_q(ctrl);
+ if (ret)
goto out_free_tag_set;
- }
}
ret = nvme_tcp_start_io_queues(ctrl);
@@ -2096,6 +2175,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
@@ -2320,6 +2400,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
+ req->h2cdata_left = 0;
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fb24746de06..46d0dab686dd 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -511,7 +511,11 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
goto done;
}
- nvmet_ns_revalidate(req->ns);
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 091a0ca16361..8fedd1e052fe 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -60,10 +60,11 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
if (nvmet_addr_family[i].type == adrfam)
- return sprintf(page, "%s\n", nvmet_addr_family[i].name);
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_family[i].name);
}
- return sprintf(page, "\n");
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
@@ -93,10 +94,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam);
static ssize_t nvmet_addr_portid_show(struct config_item *item,
char *page)
{
- struct nvmet_port *port = to_nvmet_port(item);
+ __le16 portid = to_nvmet_port(item)->disc_addr.portid;
- return snprintf(page, PAGE_SIZE, "%d\n",
- le16_to_cpu(port->disc_addr.portid));
+ return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
}
static ssize_t nvmet_addr_portid_store(struct config_item *item,
@@ -124,8 +124,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.traddr);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
}
static ssize_t nvmet_addr_traddr_store(struct config_item *item,
@@ -162,10 +161,11 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
if (treq == nvmet_addr_treq[i].type)
- return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_treq[i].name);
}
- return sprintf(page, "\n");
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_treq_store(struct config_item *item,
@@ -199,8 +199,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.trsvcid);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
}
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
@@ -284,7 +283,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
if (port->disc_addr.trtype == nvmet_transport[i].type)
- return sprintf(page, "%s\n", nvmet_transport[i].name);
+ return snprintf(page, PAGE_SIZE,
+ "%s\n", nvmet_transport[i].name);
}
return sprintf(page, "\n");
@@ -586,7 +586,8 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
mutex_unlock(&ns->subsys->lock);
return -EINVAL;
}
- nvmet_ns_revalidate(ns);
+ if (nvmet_ns_revalidate(ns))
+ nvmet_ns_changed(ns->subsys, ns->nsid);
mutex_unlock(&ns->subsys->lock);
return count;
}
@@ -1233,44 +1234,6 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
-static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
- char *page)
-{
- return snprintf(page, PAGE_SIZE, "%s\n",
- nvmet_disc_subsys->subsysnqn);
-}
-
-static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct nvmet_subsys *subsys = to_subsys(item);
- char *subsysnqn;
- int len;
-
- len = strcspn(page, "\n");
- if (!len)
- return -EINVAL;
-
- subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
- if (!subsysnqn)
- return -ENOMEM;
-
- /*
- * The discovery NQN must be different from subsystem NQN.
- */
- if (!strcmp(subsysnqn, subsys->subsysnqn)) {
- kfree(subsysnqn);
- return -EBUSY;
- }
- down_write(&nvmet_config_sem);
- kfree(nvmet_disc_subsys->subsysnqn);
- nvmet_disc_subsys->subsysnqn = subsysnqn;
- up_write(&nvmet_config_sem);
-
- return count;
-}
-CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@@ -1300,7 +1263,6 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
- &nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5119c687de68..64c2d2f3e25c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -531,7 +531,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
ns->nsid);
}
-void nvmet_ns_revalidate(struct nvmet_ns *ns)
+bool nvmet_ns_revalidate(struct nvmet_ns *ns)
{
loff_t oldsize = ns->size;
@@ -540,8 +540,7 @@ void nvmet_ns_revalidate(struct nvmet_ns *ns)
else
nvmet_file_ns_revalidate(ns);
- if (oldsize != ns->size)
- nvmet_ns_changed(ns->subsys, ns->nsid);
+ return oldsize != ns->size;
}
int nvmet_ns_enable(struct nvmet_ns *ns)
@@ -1400,7 +1399,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (subsys->cntlid_min > subsys->cntlid_max)
goto out_free_sqs;
- ret = ida_simple_get(&cntlid_ida,
+ ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
@@ -1459,7 +1458,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
@@ -1493,8 +1492,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
- !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 22b5108168a6..de90001fc5c4 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1115,7 +1115,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (!assoc)
return NULL;
- idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
if (idx < 0)
goto out_free_assoc;
@@ -1157,7 +1157,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
out_put:
nvmet_fc_tgtport_put(tgtport);
out_ida:
- ida_simple_remove(&tgtport->assoc_cnt, idx);
+ ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
return NULL;
@@ -1183,7 +1183,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* if pending Rcv Disconnect Association LS, send rsp now */
if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
- ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ ida_free(&tgtport->assoc_cnt, assoc->a_id);
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
@@ -1341,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
}
/**
- * nvme_fc_register_targetport - transport entry point called by an
+ * nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local
* NVME subystem FC port.
* @pinfo: pointer to information about the port to be registered
@@ -1383,7 +1383,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_regtgt_failed;
}
- idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -1433,7 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
out_free_newrec:
put_device(dev);
out_ida_put:
- ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+ ida_free(&nvmet_fc_tgtport_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_regtgt_failed:
@@ -1460,7 +1460,7 @@ nvmet_fc_free_tgtport(struct kref *ref)
/* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port);
- ida_simple_remove(&nvmet_fc_tgtport_cnt,
+ ida_free(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt);
@@ -1604,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
}
/**
- * nvme_fc_unregister_targetport - transport entry point called by an
+ * nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port.
* @target_port: pointer to the (registered) target port that is to be
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 70ca9dfc1771..d886c2c59554 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include <linux/memremap.h>
#include <linux/module.h>
#include "nvmet.h"
@@ -76,6 +77,14 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
+ /*
+ * When buffered_io namespace attribute is enabled that means user want
+ * this block device to be used as a file, so block device can take
+ * an advantage of cache.
+ */
+ if (ns->buffered_io)
+ return -ENOTBLK;
+
ns->bdev = blkdev_get_by_path(ns->device_path,
FMODE_READ | FMODE_WRITE, NULL);
if (IS_ERR(ns->bdev)) {
@@ -267,15 +276,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = op;
blk_start_plug(&plug);
if (req->metadata_len)
@@ -296,10 +305,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
}
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
- bio_set_dev(bio, req->ns->bdev);
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ op, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -328,11 +336,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6be6e59d273b..6485dc8eb974 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -14,16 +14,9 @@
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
-int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
{
- struct kstat stat;
- int ret;
-
- ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
- AT_STATX_FORCE_SYNC);
- if (!ret)
- ns->size = stat.size;
- return ret;
+ ns->size = i_size_read(ns->file->f_mapping->host);
}
void nvmet_file_ns_disable(struct nvmet_ns *ns)
@@ -43,7 +36,7 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
- int ret;
+ int ret = 0;
if (!ns->buffered_io)
flags |= O_DIRECT;
@@ -57,9 +50,7 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return ret;
}
- ret = nvmet_file_ns_revalidate(ns);
- if (ret)
- goto err;
+ nvmet_file_ns_revalidate(ns);
/*
* i_blkbits can be greater than the universally accepted upper bound,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index eb1094254c82..23f9d6f88804 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -543,11 +543,9 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
goto out_destroy_queues;
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tagset;
- }
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index af193423c10b..d910c6aad4b6 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -541,8 +541,8 @@ u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
-int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
-void nvmet_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+bool nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 9e5b89ae29df..a4de1e0d518b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -206,12 +206,13 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), req_op(rq));
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
+ bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
+ GFP_KERNEL);
bio->bi_end_io = bio_put;
}
- bio->bi_opf = req_op(rq);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
@@ -253,11 +254,12 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
timeout = nvmet_req_subsys(req)->admin_timeout;
}
- rq = nvme_alloc_request(q, req->cmd, 0);
+ rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
}
+ nvme_init_request(rq, req->cmd);
if (timeout)
rq->timeout = timeout;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1deb4043e242..2446d0918a41 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1356,7 +1356,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
!queue->host_qid);
}
nvmet_rdma_free_rsps(queue);
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
kfree(queue);
}
@@ -1459,7 +1459,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list);
- queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_destroy_sq;
@@ -1510,7 +1510,7 @@ out_free_cmds:
out_free_responses:
nvmet_rdma_free_rsps(queue);
out_ida_remove:
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
@@ -1703,7 +1703,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
}
/**
- * nvme_rdma_device_removal() - Handle RDMA device removal
+ * nvmet_rdma_device_removal() - Handle RDMA device removal
* @cm_id: rdma_cm id, used for nvmet port
* @queue: nvmet rdma queue (cm id qp_context)
*
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c1c43ce466b..83ca577f72be 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1473,7 +1473,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
@@ -1613,7 +1613,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
- queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
goto out_free_queue;
@@ -1646,7 +1646,7 @@ out_destroy_sq:
out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue:
kfree(queue);
return ret;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 46bc30fe85d2..e34718b09550 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -123,7 +123,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
goto done;
}
- nvmet_ns_revalidate(req->ns);
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
@@ -412,10 +416,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
while (sector < get_capacity(bdev->bd_disk)) {
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
- bio = blk_next_bio(bio, 0, GFP_KERNEL);
- bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
+ bio = blk_next_bio(bio, bdev, 0,
+ zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
/* This may take a while, so be nice to others */
cond_resched();
}
@@ -522,6 +526,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -551,14 +556,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
- bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
}
- bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sect;
bio->bi_private = req;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 23a38dcf0fc4..9fd1602b539d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (config->wp_gpio)
nvmem->wp_gpio = config->wp_gpio;
- else
+ else if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ad85ff6474ff..ec315b060cd5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
}
fdt_scan_reserved_mem();
- fdt_init_reserved_mem();
fdt_reserve_elfcorehdr();
+ fdt_init_reserved_mem();
}
/**
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 9c0fb962c22b..75caa6f5d36f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/kmemleak.h>
+#include <linux/cma.h>
#include "of_private.h"
@@ -116,12 +117,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL)
- && !nomap) {
- unsigned long order =
- max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
-
- align = max(align, (phys_addr_t)PAGE_SIZE << order);
- }
+ && !nomap)
+ align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 70992103c07d..2c2fb161b572 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void)
unittest(pdev, "device 2 creation failed\n");
EXPECT_BEGIN(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
irq = platform_get_irq(pdev, 0);
EXPECT_END(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 059566f54429..9be007c9420f 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_calls++;
#endif
- while(sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ccio_unmap_page(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e60690d38d67..374b9199878d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
- while (sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 489586a4cdc7..768d33f9ebc8 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -356,8 +356,8 @@ static int j721e_pcie_probe(struct platform_device *pdev)
const struct j721e_pcie_data *data;
struct cdns_pcie *cdns_pcie;
struct j721e_pcie *pcie;
- struct cdns_pcie_rc *rc;
- struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc = NULL;
+ struct cdns_pcie_ep *ep = NULL;
struct gpio_desc *gpiod;
void __iomem *base;
struct clk *clk;
@@ -376,6 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return 0;
+ }
+
pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
@@ -426,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
- if (!bridge) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
-
- if (!data->byte_access_allowed)
- bridge->ops = &cdns_ti_pcie_host_ops;
- rc = pci_host_bridge_priv(bridge);
- rc->quirk_retrain_flag = data->quirk_retrain_flag;
- rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &rc->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
@@ -497,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
- ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &ep->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
dev_err(dev, "Failed to init phy\n");
@@ -525,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_pcie_setup;
break;
- default:
- dev_err(dev, "INVALID device type %d\n", mode);
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index fa6886d66488..c625fc6bb287 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -756,22 +756,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev)
return 0;
}
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
static const struct of_device_id kirin_pcie_match[] = {
- {
- .compatible = "hisilicon,kirin960-pcie",
- .data = (void *)PCIE_KIRIN_INTERNAL_PHY
- },
- {
- .compatible = "hisilicon,kirin970-pcie",
- .data = (void *)PCIE_KIRIN_EXTERNAL_PHY
- },
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
{},
};
static int kirin_pcie_probe(struct platform_device *pdev)
{
- enum pcie_kirin_phy_type phy_type;
struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
int ret;
@@ -781,13 +787,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- phy_type = (long)of_device_get_match_data(dev);
- if (!phy_type) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
-
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
return -ENOMEM;
@@ -800,7 +805,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->ops = &kirin_dw_pcie_ops;
pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
- kirin_pcie->type = phy_type;
+ kirin_pcie->type = data->phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 20ea2ee330b8..ae0bc2fee4ca 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
if (!hv_dev)
continue;
- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
put_pcichild(hv_dev);
}
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 71258ea3d35f..f8e82c5e2d87 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
* indirectly via kernel emulated PCI bridge driver.
*/
mvebu_pcie_setup_hw(port);
- mvebu_pcie_set_local_dev_nr(port, 0);
+ mvebu_pcie_set_local_dev_nr(port, 1);
+ mvebu_pcie_set_local_bus_nr(port, 0);
}
pcie->nports = i;
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 854d95163112..a2c3c207a04b 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq < 0)
return -ENOSPC;
- fwspec.param[1] += hwirq;
+ fwspec.param[fwspec.param_count - 2] += hwirq;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
if (ret)
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 3824862ea144..33eb37a2225c 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -109,15 +109,6 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
writel_relaxed(val, pcie->base + reg);
}
-static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
-{
- u32 val = readl_relaxed(pcie->base + reg);
-
- val &= ~clr;
- val |= set;
- writel_relaxed(val, pcie->base + reg);
-}
-
static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
{
return readl_relaxed(port->base + reg);
@@ -557,7 +548,7 @@ static struct platform_driver mt7621_pcie_driver = {
.remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
- .of_match_table = of_match_ptr(mt7621_pcie_ids),
+ .of_match_table = mt7621_pcie_ids,
},
};
builtin_platform_driver(mt7621_pcie_driver);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index cc166c683638..eb05cceab964 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -99,11 +99,13 @@ struct vmd_irq {
* @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
+ * @virq: The underlying VMD Linux interrupt number
*/
struct vmd_irq_list {
struct list_head irq_list;
struct srcu_struct srcu;
unsigned int count;
+ unsigned int virq;
};
struct vmd_dev {
@@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *desc = arg->desc;
struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
- unsigned int index, vector;
if (!vmdirq)
return -ENOMEM;
@@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
INIT_LIST_HEAD(&vmdirq->node);
vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
- index = index_from_irqs(vmd, vmdirq->irq);
- vector = pci_irq_vector(vmd->dev, index);
- irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+ irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
handle_untracked_irq, vmd, NULL);
return 0;
}
@@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
- err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+ vmd->irqs[i].virq = pci_irq_vector(dev, i);
+ err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
@@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
return 0;
}
@@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev)
int err, i;
for (i = 0; i < vmd->msix_count; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ err = devm_request_irq(dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 0d63541c4052..e9cf318e6670 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -28,6 +28,7 @@ void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
msi_domain_free_irqs_descs_locked(domain, &dev->dev);
else
pci_msi_legacy_teardown_msi_irqs(dev);
+ msi_free_msi_descs(&dev->dev);
}
/**
@@ -171,8 +172,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
- info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS |
- MSI_FLAG_FREE_MSI_DESCS;
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS;
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
info->flags |= MSI_FLAG_MUST_REACTIVATE;
diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c
index cdbb4689db78..db761adef652 100644
--- a/drivers/pci/msi/legacy.c
+++ b/drivers/pci/msi/legacy.c
@@ -77,5 +77,4 @@ void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
{
msi_device_destroy_sysfs(&dev->dev);
arch_teardown_msi_irqs(dev);
- msi_free_msi_descs(&dev->dev);
}
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c19c7ca58186..9037a7827eca 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -1111,7 +1111,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
if (!desc)
return cpu_possible_mask;
- if (WARN_ON_ONCE(!desc->affinity))
+ /* MSI[X] interrupts can be allocated without affinity descriptor */
+ if (!desc->affinity)
return NULL;
/*
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 588588cfda48..4ceeb75fc899 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -350,7 +350,6 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
const struct pci_device_id *id)
{
int error, node, cpu;
- int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
struct drv_dev_and_id ddi = { drv, dev, id };
/*
@@ -368,17 +367,29 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* device is probed from work_on_cpu() of the Physical device.
*/
if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
- pci_physfn_is_probed(dev))
+ pci_physfn_is_probed(dev)) {
cpu = nr_cpu_ids;
- else
+ } else {
+ cpumask_var_t wq_domain_mask;
+
+ if (!zalloc_cpumask_var(&wq_domain_mask, GFP_KERNEL)) {
+ error = -ENOMEM;
+ goto out;
+ }
+ cpumask_and(wq_domain_mask,
+ housekeeping_cpumask(HK_TYPE_WQ),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+
cpu = cpumask_any_and(cpumask_of_node(node),
- housekeeping_cpumask(hk_flags));
+ wq_domain_mask);
+ free_cpumask_var(wq_domain_mask);
+ }
if (cpu < nr_cpu_ids)
error = work_on_cpu(cpu, local_pci_probe, &ddi);
else
error = local_pci_probe(&ddi);
-
+out:
dev->is_probed = 0;
cpu_hotplug_enable();
return error;
@@ -596,7 +607,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
int error;
error = drv->suspend(pci_dev, state);
- suspend_report_result(drv->suspend, error);
+ suspend_report_result(dev, drv->suspend, error);
if (error)
return error;
@@ -775,7 +786,7 @@ static int pci_pm_suspend(struct device *dev)
int error;
error = pm->suspend(dev);
- suspend_report_result(pm->suspend, error);
+ suspend_report_result(dev, pm->suspend, error);
if (error)
return error;
@@ -821,7 +832,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
int error;
error = pm->suspend_noirq(dev);
- suspend_report_result(pm->suspend_noirq, error);
+ suspend_report_result(dev, pm->suspend_noirq, error);
if (error)
return error;
@@ -1010,7 +1021,7 @@ static int pci_pm_freeze(struct device *dev)
int error;
error = pm->freeze(dev);
- suspend_report_result(pm->freeze, error);
+ suspend_report_result(dev, pm->freeze, error);
if (error)
return error;
}
@@ -1030,7 +1041,7 @@ static int pci_pm_freeze_noirq(struct device *dev)
int error;
error = pm->freeze_noirq(dev);
- suspend_report_result(pm->freeze_noirq, error);
+ suspend_report_result(dev, pm->freeze_noirq, error);
if (error)
return error;
}
@@ -1116,7 +1127,7 @@ static int pci_pm_poweroff(struct device *dev)
int error;
error = pm->poweroff(dev);
- suspend_report_result(pm->poweroff, error);
+ suspend_report_result(dev, pm->poweroff, error);
if (error)
return error;
}
@@ -1154,7 +1165,7 @@ static int pci_pm_poweroff_noirq(struct device *dev)
int error;
error = pm->poweroff_noirq(dev);
- suspend_report_result(pm->poweroff_noirq, error);
+ suspend_report_result(dev, pm->poweroff_noirq, error);
if (error)
return error;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index bda630889f95..604feeb84ee4 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = -1;
+
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
@@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- int status, capabilities, irq_services, i, nr_service;
- int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
- [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
- };
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
/* Enable PCI Express port device */
status = pci_enable_device(dev);
@@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev)
return 0;
pci_set_master(dev);
-
- irq_services = 0;
- if (IS_ENABLED(CONFIG_PCIE_PME))
- irq_services |= PCIE_PORT_SERVICE_PME;
- if (IS_ENABLED(CONFIG_PCIEAER))
- irq_services |= PCIE_PORT_SERVICE_AER;
- if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
- irq_services |= PCIE_PORT_SERVICE_HP;
- if (IS_ENABLED(CONFIG_PCIE_DPC))
- irq_services |= PCIE_PORT_SERVICE_DPC;
- irq_services &= capabilities;
-
- if (irq_services) {
- /*
- * Initialize service IRQs. Don't use service devices that
- * require interrupts if there is no way to generate them.
- * However, some drivers may have a polling mode (e.g.
- * pciehp_poll_mode) that can be used in the absence of IRQs.
- * Allow them to determine if that is to be used.
- */
- status = pcie_init_service_irqs(dev, irqs, irq_services);
- if (status) {
- irq_services &= PCIE_PORT_SERVICE_HP;
- if (!irq_services)
- goto error_disable;
- }
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+ status = pcie_init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+ goto error_disable;
}
/* Allocate child services if any */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d2dd6a6cda60..65f7f6b0576c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
*/
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
- (pdev->device == 0x7341 && pdev->revision != 0x00))
- return;
-
if (pdev->device == 0x15d8) {
if (pdev->revision == 0xcf &&
pdev->subsystem_vendor == 0xea50 &&
@@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
/* AMD Iceland dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
/* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e1a0c44bc686..d05ca6ebbb9d 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -141,11 +141,25 @@ config ARM_DMC620_PMU
config MARVELL_CN10K_TAD_PMU
tristate "Marvell CN10K LLC-TAD PMU"
- depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
help
Provides support for Last-Level cache Tag-and-data Units (LLC-TAD)
performance monitors on CN10K family silicons.
+config APPLE_M1_CPU_PMU
+ bool "Apple M1 CPU PMU support"
+ depends on ARM_PMU && ARCH_APPLE
+ help
+ Provides support for the non-architectural CPU PMUs present on
+ the Apple M1 SoCs and derivatives.
+
source "drivers/perf/hisilicon/Kconfig"
+config MARVELL_CN10K_DDR_PMU
+ tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Enable perf support for Marvell DDR Performance monitoring
+ event on CN10K platform.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 2db5418d5b0a..4f43080ec54e 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
+obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
+obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
new file mode 100644
index 000000000000..979a7c2b4f56
--- /dev/null
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CPU PMU driver for the Apple M1 and derivatives
+ *
+ * Copyright (C) 2021 Google LLC
+ *
+ * Author: Marc Zyngier <maz@kernel.org>
+ *
+ * Most of the information used in this driver was provided by the
+ * Asahi Linux project. The rest was experimentally discovered.
+ */
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+#include <asm/apple_m1_pmu.h>
+#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+
+#define M1_PMU_NR_COUNTERS 10
+
+#define M1_PMU_CFG_EVENT GENMASK(7, 0)
+
+#define ANY_BUT_0_1 GENMASK(9, 2)
+#define ONLY_2_TO_7 GENMASK(7, 2)
+#define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6))
+#define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7))
+
+/*
+ * Description of the events we actually know about, as well as those with
+ * a specific counter affinity. Yes, this is a grand total of two known
+ * counters, and the rest is anybody's guess.
+ *
+ * Not all counters can count all events. Counters #0 and #1 are wired to
+ * count cycles and instructions respectively, and some events have
+ * bizarre mappings (every other counter, or even *one* counter). These
+ * restrictions equally apply to both P and E cores.
+ *
+ * It is worth noting that the PMUs attached to P and E cores are likely
+ * to be different because the underlying uarches are different. At the
+ * moment, we don't really need to distinguish between the two because we
+ * know next to nothing about the events themselves, and we already have
+ * per cpu-type PMU abstractions.
+ *
+ * If we eventually find out that the events are different across
+ * implementations, we'll have to introduce per cpu-type tables.
+ */
+enum m1_pmu_events {
+ M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
+ M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
+ M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
+ M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
+ M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
+ M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
+ M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
+ M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
+ M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
+ M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
+ M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
+ M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
+ M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
+ M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
+ M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
+ M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
+ M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
+ M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
+ M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
+ M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
+ M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
+ M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
+ M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
+ M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
+ M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
+ M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
+ M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
+ M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
+ M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
+ M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
+ M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
+ M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
+ M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
+
+ /*
+ * From this point onwards, these are not actual HW events,
+ * but attributes that get stored in hw->config_base.
+ */
+ M1_PMU_CFG_COUNT_USER = BIT(8),
+ M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+};
+
+/*
+ * Per-event affinity table. Most events can be installed on counter
+ * 2-9, but there are a number of exceptions. Note that this table
+ * has been created experimentally, and I wouldn't be surprised if more
+ * counters had strange affinities.
+ */
+static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
+ [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
+ [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
+ [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
+ [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
+ [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
+ [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
+};
+
+static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
+ /* No idea about the rest yet */
+};
+
+/* sysfs definitions */
+static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+#define M1_PMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
+
+static struct attribute *m1_pmu_event_attrs[] = {
+ M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
+ M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
+ NULL,
+};
+
+static const struct attribute_group m1_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = m1_pmu_event_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *m1_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static const struct attribute_group m1_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = m1_pmu_format_attrs,
+};
+
+/* Low level accessors. No synchronisation. */
+#define PMU_READ_COUNTER(_idx) \
+ case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
+
+#define PMU_WRITE_COUNTER(_val, _idx) \
+ case _idx: \
+ write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \
+ return
+
+static u64 m1_pmu_read_hw_counter(unsigned int index)
+{
+ switch (index) {
+ PMU_READ_COUNTER(0);
+ PMU_READ_COUNTER(1);
+ PMU_READ_COUNTER(2);
+ PMU_READ_COUNTER(3);
+ PMU_READ_COUNTER(4);
+ PMU_READ_COUNTER(5);
+ PMU_READ_COUNTER(6);
+ PMU_READ_COUNTER(7);
+ PMU_READ_COUNTER(8);
+ PMU_READ_COUNTER(9);
+ }
+
+ BUG();
+}
+
+static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
+{
+ switch (index) {
+ PMU_WRITE_COUNTER(val, 0);
+ PMU_WRITE_COUNTER(val, 1);
+ PMU_WRITE_COUNTER(val, 2);
+ PMU_WRITE_COUNTER(val, 3);
+ PMU_WRITE_COUNTER(val, 4);
+ PMU_WRITE_COUNTER(val, 5);
+ PMU_WRITE_COUNTER(val, 6);
+ PMU_WRITE_COUNTER(val, 7);
+ PMU_WRITE_COUNTER(val, 8);
+ PMU_WRITE_COUNTER(val, 9);
+ }
+
+ BUG();
+}
+
+#define get_bit_offset(index, mask) (__ffs(mask) + (index))
+
+static void __m1_pmu_enable_counter(unsigned int index, bool en)
+{
+ u64 val, bit;
+
+ switch (index) {
+ case 0 ... 7:
+ bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
+ break;
+ case 8 ... 9:
+ bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+
+ if (en)
+ val |= bit;
+ else
+ val &= ~bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+}
+
+static void m1_pmu_enable_counter(unsigned int index)
+{
+ __m1_pmu_enable_counter(index, true);
+}
+
+static void m1_pmu_disable_counter(unsigned int index)
+{
+ __m1_pmu_enable_counter(index, false);
+}
+
+static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
+{
+ u64 val, bit;
+
+ switch (index) {
+ case 0 ... 7:
+ bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
+ break;
+ case 8 ... 9:
+ bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+
+ if (en)
+ val |= bit;
+ else
+ val &= ~bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+}
+
+static void m1_pmu_enable_counter_interrupt(unsigned int index)
+{
+ __m1_pmu_enable_counter_interrupt(index, true);
+}
+
+static void m1_pmu_disable_counter_interrupt(unsigned int index)
+{
+ __m1_pmu_enable_counter_interrupt(index, false);
+}
+
+static void m1_pmu_configure_counter(unsigned int index, u8 event,
+ bool user, bool kernel)
+{
+ u64 val, user_bit, kernel_bit;
+ int shift;
+
+ switch (index) {
+ case 0 ... 7:
+ user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
+ kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
+ break;
+ case 8 ... 9:
+ user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
+ kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
+
+ if (user)
+ val |= user_bit;
+ else
+ val &= ~user_bit;
+
+ if (kernel)
+ val |= kernel_bit;
+ else
+ val &= ~kernel_bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+
+ /*
+ * Counters 0 and 1 have fixed events. For anything else,
+ * place the event at the expected location in the relevant
+ * register (PMESR0 holds the event configuration for counters
+ * 2-5, resp. PMESR1 for counters 6-9).
+ */
+ switch (index) {
+ case 0 ... 1:
+ break;
+ case 2 ... 5:
+ shift = (index - 2) * 8;
+ val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
+ val &= ~((u64)0xff << shift);
+ val |= (u64)event << shift;
+ write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ break;
+ case 6 ... 9:
+ shift = (index - 6) * 8;
+ val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
+ val &= ~((u64)0xff << shift);
+ val |= (u64)event << shift;
+ write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ break;
+ }
+}
+
+/* arm_pmu backend */
+static void m1_pmu_enable_event(struct perf_event *event)
+{
+ bool user, kernel;
+ u8 evt;
+
+ evt = event->hw.config_base & M1_PMU_CFG_EVENT;
+ user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
+ kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
+
+ m1_pmu_disable_counter_interrupt(event->hw.idx);
+ m1_pmu_disable_counter(event->hw.idx);
+ isb();
+
+ m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_enable_counter(event->hw.idx);
+ m1_pmu_enable_counter_interrupt(event->hw.idx);
+ isb();
+}
+
+static void m1_pmu_disable_event(struct perf_event *event)
+{
+ m1_pmu_disable_counter_interrupt(event->hw.idx);
+ m1_pmu_disable_counter(event->hw.idx);
+ isb();
+}
+
+static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ u64 overflow, state;
+ int idx;
+
+ overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
+ if (!overflow) {
+ /* Spurious interrupt? */
+ state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+ state &= ~PMCR0_IACT;
+ write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
+ isb();
+ return IRQ_NONE;
+ }
+
+ cpu_pmu->stop(cpu_pmu);
+
+ regs = get_irq_regs();
+
+ for (idx = 0; idx < cpu_pmu->num_events; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ struct perf_sample_data data;
+
+ if (!event)
+ continue;
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ m1_pmu_disable_event(event);
+ }
+
+ cpu_pmu->start(cpu_pmu);
+
+ return IRQ_HANDLED;
+}
+
+static u64 m1_pmu_read_counter(struct perf_event *event)
+{
+ return m1_pmu_read_hw_counter(event->hw.idx);
+}
+
+static void m1_pmu_write_counter(struct perf_event *event, u64 value)
+{
+ m1_pmu_write_hw_counter(value, event->hw.idx);
+ isb();
+}
+
+static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
+ unsigned long affinity = m1_pmu_event_affinity[evtype];
+ int idx;
+
+ /*
+ * Place the event on the first free counter that can count
+ * this event.
+ *
+ * We could do a better job if we had a view of all the events
+ * counting on the PMU at any given time, and by placing the
+ * most constraining events first.
+ */
+ for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ return -EAGAIN;
+}
+
+static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
+static void __m1_pmu_set_mode(u8 mode)
+{
+ u64 val;
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+ val &= ~(PMCR0_IMODE | PMCR0_IACT);
+ val |= FIELD_PREP(PMCR0_IMODE, mode);
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+ isb();
+}
+
+static void m1_pmu_start(struct arm_pmu *cpu_pmu)
+{
+ __m1_pmu_set_mode(PMCR0_IMODE_FIQ);
+}
+
+static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ __m1_pmu_set_mode(PMCR0_IMODE_OFF);
+}
+
+static int m1_pmu_map_event(struct perf_event *event)
+{
+ /*
+ * Although the counters are 48bit wide, bit 47 is what
+ * triggers the overflow interrupt. Advertise the counters
+ * being 47bit wide to mimick the behaviour of the ARM PMU.
+ */
+ event->hw.flags |= ARMPMU_EVT_47BIT;
+ return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
+}
+
+static void m1_pmu_reset(void *info)
+{
+ int i;
+
+ __m1_pmu_set_mode(PMCR0_IMODE_OFF);
+
+ for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
+ m1_pmu_disable_counter(i);
+ m1_pmu_disable_counter_interrupt(i);
+ m1_pmu_write_hw_counter(0, i);
+ }
+
+ isb();
+}
+
+static int m1_pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (!attr->exclude_guest)
+ return -EINVAL;
+ if (!attr->exclude_kernel)
+ config_base |= M1_PMU_CFG_COUNT_KERNEL;
+ if (!attr->exclude_user)
+ config_base |= M1_PMU_CFG_COUNT_USER;
+
+ event->config_base = config_base;
+
+ return 0;
+}
+
+static int m1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->handle_irq = m1_pmu_handle_irq;
+ cpu_pmu->enable = m1_pmu_enable_event;
+ cpu_pmu->disable = m1_pmu_disable_event;
+ cpu_pmu->read_counter = m1_pmu_read_counter;
+ cpu_pmu->write_counter = m1_pmu_write_counter;
+ cpu_pmu->get_event_idx = m1_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx;
+ cpu_pmu->start = m1_pmu_start;
+ cpu_pmu->stop = m1_pmu_stop;
+ cpu_pmu->map_event = m1_pmu_map_event;
+ cpu_pmu->reset = m1_pmu_reset;
+ cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
+
+ cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
+ return 0;
+}
+
+/* Device driver gunk */
+static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_icestorm_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
+static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_firestorm_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
+static const struct of_device_id m1_pmu_of_device_ids[] = {
+ { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
+ { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
+
+static int m1_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver m1_pmu_driver = {
+ .driver = {
+ .name = "apple-m1-cpu-pmu",
+ .of_match_table = m1_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = m1_pmu_device_probe,
+};
+
+module_platform_driver(m1_pmu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 54aca3a62814..96e09fa40909 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1096,7 +1096,7 @@ static void cci_pmu_enable(struct pmu *pmu)
{
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
- int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
+ bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
unsigned long flags;
if (!enabled)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index a96c31604545..40b352e8aa7f 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1460,8 +1460,7 @@ static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
static int arm_ccn_probe(struct platform_device *pdev)
{
struct arm_ccn *ccn;
- struct resource *res;
- unsigned int irq;
+ int irq;
int err;
ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
@@ -1474,10 +1473,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
if (IS_ERR(ccn->base))
return PTR_ERR(ccn->base);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -EINVAL;
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/* Check if we can use the interrupt */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 0e48adce57ef..9c1d82be7a2f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -71,9 +71,11 @@
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
-#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
-#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
-#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
+#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
+#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
+#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
+#define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
+#define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4)
#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
#define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
@@ -155,6 +157,7 @@
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
+/* Note that we don't yet support the tertiary match group on newer IPs */
#define CMN_CONFIG_WP_GRP BIT_ULL(56)
#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
@@ -353,7 +356,7 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL;
}
-struct dentry *arm_cmn_debugfs;
+static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
@@ -595,6 +598,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
+ if (chan == 4 && cmn->model == CMN600)
+ return 0;
+
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2))
return 0;
@@ -905,15 +911,18 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
u32 grp = CMN_EVENT_WP_GRP(event);
u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
u32 combine = CMN_EVENT_WP_COMBINE(event);
+ bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
+ if (exc)
+ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
+ CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
if (combine && !grp)
- config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
-
+ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
+ CMN_DTM_WPn_CONFIG_WP_COMBINE;
return config;
}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 295cc7952d0e..9694370651fa 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
if (event->hw.flags & ARMPMU_EVT_64BIT)
return GENMASK_ULL(63, 0);
+ else if (event->hw.flags & ARMPMU_EVT_47BIT)
+ return GENMASK_ULL(46, 0);
else
return GENMASK_ULL(31, 0);
}
@@ -524,7 +526,7 @@ static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
@@ -785,7 +787,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index a738aeab5c04..358e4e284a62 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
- int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask,
+ bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask,
hisi_pmu->num_counters);
if (!enabled)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
new file mode 100644
index 000000000000..665b382a0ee3
--- /dev/null
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/hrtimer.h>
+
+/* Performance Counters Operating Mode Control Registers */
+#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+
+/* Performance Counters Start Operation Control Registers */
+#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define START_OP_CTRL_VAL_START 0x1ULL
+#define START_OP_CTRL_VAL_ACTIVE 0x2
+
+/* Performance Counters End Operation Control Registers */
+#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define END_OP_CTRL_VAL_END 0x1ULL
+
+/* Performance Counters End Status Registers */
+#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
+
+/* Performance Counters Configuration Registers */
+#define DDRC_PERF_CFG_BASE 0x8040
+
+/* 8 Generic event counter + 2 fixed event counters */
+#define DDRC_PERF_NUM_GEN_COUNTERS 8
+#define DDRC_PERF_NUM_FIX_COUNTERS 2
+#define DDRC_PERF_READ_COUNTER_IDX DDRC_PERF_NUM_GEN_COUNTERS
+#define DDRC_PERF_WRITE_COUNTER_IDX (DDRC_PERF_NUM_GEN_COUNTERS + 1)
+#define DDRC_PERF_NUM_COUNTERS (DDRC_PERF_NUM_GEN_COUNTERS + \
+ DDRC_PERF_NUM_FIX_COUNTERS)
+
+/* Generic event counter registers */
+#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define EVENT_ENABLE BIT_ULL(63)
+
+/* Two dedicated event counters for DDR reads and writes */
+#define EVENT_DDR_READS 101
+#define EVENT_DDR_WRITES 100
+
+/*
+ * programmable events IDs in programmable event counters.
+ * DO NOT change these event-id numbers, they are used to
+ * program event bitmap in h/w.
+ */
+#define EVENT_OP_IS_ZQLATCH 55
+#define EVENT_OP_IS_ZQSTART 54
+#define EVENT_OP_IS_TCR_MRR 53
+#define EVENT_OP_IS_DQSOSC_MRR 52
+#define EVENT_OP_IS_DQSOSC_MPC 51
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR 50
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD 49
+#define EVENT_BSM_STARVATION 48
+#define EVENT_BSM_ALLOC 47
+#define EVENT_LPR_REQ_WITH_NOCREDIT 46
+#define EVENT_HPR_REQ_WITH_NOCREDIT 45
+#define EVENT_OP_IS_ZQCS 44
+#define EVENT_OP_IS_ZQCL 43
+#define EVENT_OP_IS_LOAD_MODE 42
+#define EVENT_OP_IS_SPEC_REF 41
+#define EVENT_OP_IS_CRIT_REF 40
+#define EVENT_OP_IS_REFRESH 39
+#define EVENT_OP_IS_ENTER_MPSM 35
+#define EVENT_OP_IS_ENTER_POWERDOWN 31
+#define EVENT_OP_IS_ENTER_SELFREF 27
+#define EVENT_WAW_HAZARD 26
+#define EVENT_RAW_HAZARD 25
+#define EVENT_WAR_HAZARD 24
+#define EVENT_WRITE_COMBINE 23
+#define EVENT_RDWR_TRANSITIONS 22
+#define EVENT_PRECHARGE_FOR_OTHER 21
+#define EVENT_PRECHARGE_FOR_RDWR 20
+#define EVENT_OP_IS_PRECHARGE 19
+#define EVENT_OP_IS_MWR 18
+#define EVENT_OP_IS_WR 17
+#define EVENT_OP_IS_RD 16
+#define EVENT_OP_IS_RD_ACTIVATE 15
+#define EVENT_OP_IS_RD_OR_WR 14
+#define EVENT_OP_IS_ACTIVATE 13
+#define EVENT_WR_XACT_WHEN_CRITICAL 12
+#define EVENT_LPR_XACT_WHEN_CRITICAL 11
+#define EVENT_HPR_XACT_WHEN_CRITICAL 10
+#define EVENT_DFI_RD_DATA_CYCLES 9
+#define EVENT_DFI_WR_DATA_CYCLES 8
+#define EVENT_ACT_BYPASS 7
+#define EVENT_READ_BYPASS 6
+#define EVENT_HIF_HI_PRI_RD 5
+#define EVENT_HIF_RMW 4
+#define EVENT_HIF_RD 3
+#define EVENT_HIF_WR 2
+#define EVENT_HIF_RD_OR_WR 1
+
+/* Event counter value registers */
+#define DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+
+/* Fixed event counter enable/disable register */
+#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define DDRC_PERF_FREERUN_WRITE_EN 0x1
+#define DDRC_PERF_FREERUN_READ_EN 0x2
+
+/* Fixed event counter control register */
+#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
+#define DDRC_FREERUN_READ_CNT_CLR 0x2
+
+/* Fixed event counter value register */
+#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
+#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+
+struct cn10k_ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct device *dev;
+ int active_events;
+ struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
+ struct hrtimer hrtimer;
+ struct hlist_node node;
+};
+
+#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+
+static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+
+}
+
+#define CN10K_DDR_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id)
+
+static struct attribute *cn10k_ddr_perf_events_attrs[] = {
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
+ EVENT_HPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
+ EVENT_LPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group cn10k_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = cn10k_ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-8");
+
+static struct attribute *cn10k_ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = cn10k_ddr_perf_format_attrs,
+};
+
+static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute cn10k_ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
+
+static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
+ &cn10k_ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
+ .attrs = cn10k_ddr_perf_cpumask_attrs,
+};
+
+static const struct attribute_group *cn10k_attr_groups[] = {
+ &cn10k_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+/* Default poll timeout is 100 sec, which is very sufficient for
+ * 48 bit counter incremented max at 5.6 GT/s, which may take many
+ * hours to overflow.
+ */
+static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
+module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
+
+static ktime_t cn10k_ddr_pmu_timer_period(void)
+{
+ return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
+}
+
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+{
+ switch (eventid) {
+ case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
+ case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
+ *event_bitmap = (1ULL << (eventid - 1));
+ break;
+ case EVENT_OP_IS_ENTER_SELFREF:
+ case EVENT_OP_IS_ENTER_POWERDOWN:
+ case EVENT_OP_IS_ENTER_MPSM:
+ *event_bitmap = (0xFULL << (eventid - 1));
+ break;
+ default:
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
+ struct perf_event *event)
+{
+ u8 config = event->attr.config;
+ int i;
+
+ /* DDR read free-run counter index */
+ if (config == EVENT_DDR_READS) {
+ pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
+ return DDRC_PERF_READ_COUNTER_IDX;
+ }
+
+ /* DDR write free-run counter index */
+ if (config == EVENT_DDR_WRITES) {
+ pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
+ return DDRC_PERF_WRITE_COUNTER_IDX;
+ }
+
+ /* Allocate DDR generic counters */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL) {
+ pmu->events[i] = event;
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static int cn10k_ddr_perf_event_init(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event)) {
+ dev_info(pmu->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We must NOT create groups containing mixed PMUs */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ /* Set ownership of event to one CPU, same event can not be observed
+ * on multiple cpus at same time.
+ */
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+ return 0;
+}
+
+static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
+ int counter, bool enable)
+{
+ u32 reg;
+ u64 val;
+
+ if (counter > DDRC_PERF_NUM_COUNTERS) {
+ pr_err("Error: unsupported counter %d\n", counter);
+ return;
+ }
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ reg = DDRC_PERF_CFG(counter);
+ val = readq_relaxed(pmu->base + reg);
+
+ if (enable)
+ val |= EVENT_ENABLE;
+ else
+ val &= ~EVENT_ENABLE;
+
+ writeq_relaxed(val, pmu->base + reg);
+ } else {
+ val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ if (enable) {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+ }
+ writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ }
+}
+
+static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
+{
+ u64 val;
+
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+
+ if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
+ return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+
+ val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ return val;
+}
+
+static void cn10k_ddr_perf_event_update(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+ } while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
+
+ mask = DDRC_PERF_CNT_MAX_VALUE;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, true);
+
+ hwc->state = 0;
+}
+
+static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u8 config = event->attr.config;
+ int counter, ret;
+ u32 reg_offset;
+ u64 val;
+
+ counter = cn10k_ddr_perf_alloc_counter(pmu, event);
+ if (counter < 0)
+ return -EAGAIN;
+
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ if (pmu->active_events == 1)
+ hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
+ HRTIMER_MODE_REL_PINNED);
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ /* Generic counters, configure event id */
+ reg_offset = DDRC_PERF_CFG(counter);
+ ret = ddr_perf_get_event_bitmap(config, &val);
+ if (ret)
+ return ret;
+
+ writeq_relaxed(val, pmu->base + reg_offset);
+ } else {
+ /* fixed event counter, clear counter value */
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ else
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+
+ writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ }
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ cn10k_ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, false);
+
+ if (flags & PERF_EF_UPDATE)
+ cn10k_ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ cn10k_ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+
+ /* Cancel timer when no events to capture */
+ if (pmu->active_events == 0)
+ hrtimer_cancel(&pmu->hrtimer);
+}
+
+static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_CNT_START_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_CNT_END_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
+{
+ struct hw_perf_event *hwc;
+ int i;
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ cn10k_ddr_perf_event_update(pmu->events[i]);
+ }
+
+ /* Reset previous count as h/w counter are reset */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ hwc = &pmu->events[i]->hw;
+ local64_set(&hwc->prev_count, 0);
+ }
+}
+
+static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
+{
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ u64 prev_count, new_count;
+ u64 value;
+ int i;
+
+ event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ value = cn10k_ddr_perf_read_counter(pmu, i);
+ if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ pr_info("Counter-(%d) reached max value\n", i);
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cn10k_ddr_pmu_overflow_handler(pmu);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
+ node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
+static int cn10k_ddr_perf_probe(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu;
+ struct resource *res;
+ void __iomem *base;
+ char *name;
+ int ret;
+
+ ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
+ if (!ddr_pmu)
+ return -ENOMEM;
+
+ ddr_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ddr_pmu);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ddr_pmu->base = base;
+
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
+ DDRC_PERF_CNT_OP_MODE_CTRL);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+
+ /* Choose this cpu to collect perf data */
+ ddr_pmu->cpu = raw_smp_processor_id();
+
+ name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx",
+ res->start);
+ if (!name)
+ return -ENOMEM;
+
+ hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+
+ cpuhp_state_add_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
+ if (ret)
+ goto error;
+
+ pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+ return ret;
+}
+
+static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ perf_pmu_unregister(&ddr_pmu->pmu);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
+ { .compatible = "marvell,cn10k-ddr-pmu", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
+#endif
+
+static struct platform_driver cn10k_ddr_pmu_driver = {
+ .driver = {
+ .name = "cn10k-ddr-pmu",
+ .of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = cn10k_ddr_perf_probe,
+ .remove = cn10k_ddr_perf_remove,
+};
+
+static int __init cn10k_ddr_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ "perf/marvell/cn10k/ddr:online", NULL,
+ cn10k_ddr_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cn10k_ddr_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+ return ret;
+}
+
+static void __exit cn10k_ddr_pmu_exit(void)
+{
+ platform_driver_unregister(&cn10k_ddr_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+}
+
+module_init(cn10k_ddr_pmu_init);
+module_exit(cn10k_ddr_pmu_exit);
+
+MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 7f4d292658e3..ee67305f822d 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -368,10 +368,12 @@ static int tad_pmu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
{ .compatible = "marvell,cn10k-tad-pmu", },
{},
};
+#endif
static struct platform_driver tad_pmu_driver = {
.driver = {
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 05378c0fd8f3..1edb9c03704f 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -887,13 +887,11 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct tx2_uncore_pmu *tx2_pmu;
- struct acpi_device *adev;
enum tx2_uncore_type type;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
type = get_tx2_pmu_type(adev);
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 2b6d476bd213..0c32dffc7ede 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -867,7 +867,7 @@ static void xgene_perf_pmu_enable(struct pmu *pmu)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
- int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
+ bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask,
pmu_dev->max_counters);
if (!enabled)
@@ -1549,14 +1549,12 @@ static const struct acpi_device_id *xgene_pmu_acpi_match_type(
static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
const struct acpi_device_id *acpi_id;
struct xgene_pmu *xgene_pmu = data;
struct xgene_pmu_dev_ctx *ctx;
- struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index cd2332bf0e31..fdbd64c03e12 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy)
(DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
(DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
- DIV_ROUND_UP(priv->config.clk_pre, temp));
+ DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE));
regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
DIV_ROUND_UP(priv->config.hs_exit, temp) |
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index f81e23742079..849c4204f550 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -97,8 +97,7 @@ config PHY_BRCM_USB
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908
- default ARCH_BRCMSTB
+ default ARCH_BCM4908 || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 116fb23aebd9..0f1deb6e0eab 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -18,6 +18,7 @@
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/suspend.h>
#include "phy-brcm-usb-init.h"
@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
int init_count;
int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+ struct notifier_block pm_notifier;
+ bool pm_active;
};
static s8 *node_reg_names[BRCM_REGS_MAX] = {
"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
};
+static int brcm_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct brcm_usb_phy_data *priv =
+ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ priv->pm_active = true;
+ break;
+ case PM_POST_RESTORE:
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ priv->pm_active = false;
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
{
struct phy *gphy = dev_id;
@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
/*
* Use a lock to make sure a second caller waits until
* the base phy is inited before using it.
@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
dev_dbg(&gphy->dev, "EXIT\n");
if (phy->id == BRCM_USB_PHY_2_0)
brcm_usb_uninit_eohci(&priv->ini);
@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
return err;
+ priv->pm_notifier.notifier_call = brcm_pm_notifier;
+ register_pm_notifier(&priv->pm_notifier);
+
mutex_init(&priv->mutex);
/* make sure invert settings are correct */
@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
static int brcm_usb_phy_remove(struct platform_device *pdev)
{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
+ unregister_pm_notifier(&priv->pm_notifier);
return 0;
}
@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
+ dev_dbg(dev, "SUSPEND\n");
priv->ini.wake_enabled = device_may_wakeup(dev);
if (priv->phys[BRCM_USB_PHY_3_0].inited)
brcm_usb_uninit_xhci(&priv->ini);
@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ dev_dbg(dev, "RESUME\n");
if (priv->wake_irq >= 0)
disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index da24acd26666..e265647e29a2 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct cdns_sierra_data *data;
unsigned int id_value;
- int i, ret, node = 0;
+ int ret, node = 0;
void __iomem *base;
struct device_node *dn = dev->of_node, *child;
@@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- goto put_child2;
+ of_node_put(child);
+ goto put_control;
}
if (!sp->autoconf) {
@@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
}
@@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
@@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
- goto put_child2;
+ goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1) {
ret = cdns_sierra_phy_configure_multilink(sp);
if (ret)
- goto put_child2;
+ goto put_control;
}
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
- node++;
-put_child2:
- for (i = 0; i < node; i++)
- reset_control_put(sp->phys[i].lnk_rst);
- of_node_put(child);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
clk_disable:
cdns_sierra_phy_disable_clocks(sp);
reset_control_assert(sp->apb_rst);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 6d307102f4f6..8ee7682b8e93 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
/* no efuse, ignore it */
if (!instance->efuse_intr &&
!instance->efuse_rx_imp &&
- !instance->efuse_rx_imp) {
+ !instance->efuse_tx_imp) {
dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
instance->efuse_sw_en = 0;
break;
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 288c9c67aa74..ccb4045685cd 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
cfg->clk_miss = 0;
cfg->clk_post = 60000 + 52 * ui;
- cfg->clk_pre = 8000;
+ cfg->clk_pre = 8;
cfg->clk_prepare = 38000;
cfg->clk_settle = 95000;
cfg->clk_term_en = 0;
@@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
if (cfg->clk_post < (60000 + 52 * ui))
return -EINVAL;
- if (cfg->clk_pre < 8000)
+ if (cfg->clk_pre < 8)
return -EINVAL;
if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000)
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 347dc79a18c1..630e01b5c19b 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -5,6 +5,7 @@
* Author: Wyon Bi <bivvy.bi@rock-chips.com>
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
@@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
* The value of counter for HS Tlpx Time
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 2ce9bfd783d4..007a23c78d56 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
ret = __stm32_usbphyc_pll_disable(usbphyc);
if (ret)
- return ret;
+ goto dec_n_pll_cons;
}
ret = stm32_usbphyc_regulators_enable(usbphyc);
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index b3384c31637a..da546c35d1d5 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = {
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
{ .val = 3, .div = 8, },
+ { /* sentinel */ },
};
static const struct wiz_clk_div_sel clk_div_sel[] = {
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index f478d8a17115..9be9535ad7ab 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -134,7 +134,8 @@
#define PROT_BUS_WIDTH_10 0x0
#define PROT_BUS_WIDTH_20 0x1
#define PROT_BUS_WIDTH_40 0x2
-#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2)
+#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2)
/* Number of GT lanes */
#define NUM_LANES 4
@@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
{
struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
+ u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
/* Set SGMII protocol TX and RX bus width to 10 bits. */
- xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
- xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
+ xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
xpsgtr_bypass_scrambler_8b10b(gtr_phy);
}
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 08c364d611f5..f64d29f614ec 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -42,9 +42,9 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
+obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
-obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 5123f4c33854..ac1e400bbbac 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -35,6 +35,7 @@ config PINCTRL_BCM63XX
select PINCONF
select GENERIC_PINCONF
select GPIOLIB
+ select REGMAP
select GPIO_REGMAP
config PINCTRL_BCM6318
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index c4ebfa852b42..47e433e09c5c 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1269,16 +1269,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_remove;
}
if (is_7211) {
pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
sizeof(*pc->wake_irq),
GFP_KERNEL);
- if (!pc->wake_irq)
- return -ENOMEM;
+ if (!pc->wake_irq) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
}
/*
@@ -1306,8 +1308,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
len = strlen(dev_name(pc->dev)) + 16;
name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
+ if (!name) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
@@ -1326,11 +1330,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
err = gpiochip_add_data(&pc->gpio_chip, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return err;
+ goto out_remove;
}
return 0;
+
+out_remove:
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
+ return err;
}
static struct platform_driver bcm2835_pinctrl_driver = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index abffda1fd51e..1d5818269076 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1471,8 +1471,9 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
offset = cctx->intr_lines[intr_line];
if (offset == CHV_INVALID_HWIRQ) {
- dev_err(dev, "interrupt on unused interrupt line %u\n", intr_line);
- continue;
+ dev_warn_once(dev, "interrupt on unmapped interrupt line %u\n", intr_line);
+ /* Some boards expect hwirq 0 to trigger in this case */
+ offset = 0;
}
generic_handle_domain_irq(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85750974d182..826d494f3cc6 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -451,8 +451,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value &= ~PADCFG0_PMODE_MASK;
value |= PADCFG0_PMODE_GPIO;
- /* Disable input and output buffers */
- value |= PADCFG0_GPIORXDIS;
+ /* Disable TX buffer and enable RX (this will be input) */
+ value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
/* Disable SCI/SMI/NMI generation */
@@ -497,9 +497,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
intel_gpio_set_gpio_mode(padcfg0);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(padcfg0, true);
-
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -1115,9 +1112,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
intel_gpio_set_gpio_mode(reg);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(reg, true);
-
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
@@ -1216,6 +1210,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return IRQ_RETVAL(ret);
}
+static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *community;
+ void __iomem *base;
+ unsigned int gpp;
+
+ community = &pctrl->communities[i];
+ base = community->regs;
+
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ /* Mask and clear all interrupts */
+ writel(0, base + community->ie_offset + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
+ }
+ }
+}
+
+static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ /*
+ * Make sure the interrupt lines are in a proper state before
+ * further configuration.
+ */
+ intel_gpio_irq_init(pctrl);
+
+ return 0;
+}
+
static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
@@ -1320,6 +1347,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
girq->num_parents = 0;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
+ girq->init_hw = intel_gpio_irq_init_hw;
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
@@ -1695,26 +1723,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
}
EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
-static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
-{
- size_t i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *community;
- void __iomem *base;
- unsigned int gpp;
-
- community = &pctrl->communities[i];
- base = community->regs;
-
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- /* Mask and clear all interrupts */
- writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + community->is_offset + gpp * 4);
- }
- }
-}
-
static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 0bcd19597e4a..3ddaeffc0415 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ "INT34C6", (kernel_ulong_t)&tglh_soc_data },
{ "INTC1055", (kernel_ulong_t)&tgllp_soc_data },
- { "INTC1057", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 4d81908d6725..ba536fd4d674 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -78,7 +78,6 @@ struct npcm7xx_gpio {
struct gpio_chip gc;
int irqbase;
int irq;
- void *priv;
struct irq_chip irq_chip;
u32 pinctrl_id;
int (*direction_input)(struct gpio_chip *chip, unsigned offset);
@@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
- dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
+ dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts,
en);
sts &= en;
@@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = BIT(d->hwirq);
- dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
+ dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
d->irq, type);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- dev_dbg(d->chip->parent_device, "edge.rising\n");
+ dev_dbg(bank->gc.parent, "edge.rising\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_FALLING:
- dev_dbg(d->chip->parent_device, "edge.falling\n");
+ dev_dbg(bank->gc.parent, "edge.falling\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
- dev_dbg(d->chip->parent_device, "edge.both\n");
+ dev_dbg(bank->gc.parent, "edge.both\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
- dev_dbg(d->chip->parent_device, "level.low\n");
+ dev_dbg(bank->gc.parent, "level.low\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_LEVEL_HIGH:
- dev_dbg(d->chip->parent_device, "level.high\n");
+ dev_dbg(bank->gc.parent, "level.high\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
default:
- dev_dbg(d->chip->parent_device, "invalid irq type\n");
+ dev_dbg(bank->gc.parent, "invalid irq type\n");
return -EINVAL;
}
@@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d)
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = d->hwirq;
- dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
}
@@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* Clear events */
- dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
}
@@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* Enable events */
- dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
}
@@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* active-high, input, clear interrupt, enable interrupt */
- dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
+ dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq);
npcmgpio_direction_input(gc, gpio);
npcmgpio_irq_ack(d);
npcmgpio_irq_unmask(d);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 49e32684dbb2..ecab6bf63dc6 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
{
int i;
- for (i = K210_PC_DRIVE_MAX; i; i--) {
+ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
if (k210_pinconf_drive_strength[i] <= max_strength_ua)
return i;
}
@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
if (!arg)
return -EINVAL;
- val |= K210_PC_PD;
+ val |= K210_PC_PU;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg *= 1000;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 8e081c90bdb2..639f1130e989 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -137,7 +137,8 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit)
static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off)
{
- return priv->properties->regoff[rno] + off;
+ return (priv->properties->regoff[rno] + off) *
+ regmap_get_reg_stride(priv->regs);
}
static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 0b912152a405..ab4b2ee9f217 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static struct irq_chip starfive_irq_chip = {
+ .name = "StarFive GPIO",
.irq_ack = starfive_irq_ack,
.irq_mask = starfive_irq_mask,
.irq_mask_ack = starfive_irq_mask_ack,
@@ -1307,9 +1308,6 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.base = -1;
sfp->gc.ngpio = NR_GPIOS;
- starfive_irq_chip.parent_device = dev;
- starfive_irq_chip.name = sfp->gc.label;
-
sfp->gc.irq.chip = &starfive_irq_chip;
sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
sfp->gc.irq.num_parents = 1;
@@ -1330,6 +1328,8 @@ static int starfive_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not register gpiochip\n");
+ irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
+
out_pinctrl_enable:
return pinctrl_enable(sfp->pctl);
}
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
index b5b47f4dd774..79d44bca039e 100644
--- a/drivers/pinctrl/pinctrl-thunderbay.c
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -773,63 +773,42 @@ static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc)
static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs)
{
- struct function_desc *function = funcs;
int i;
/* Assign the groups for each function */
- for (i = 0; i < tpc->soc->npins; i++) {
- const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i;
- struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
-
- while (pin_mux->name) {
- const char **grp;
- int j, grp_num, match = 0;
- size_t grp_size;
- struct function_desc *func;
-
- for (j = 0; j < tpc->nfuncs; j++) {
- if (!strcmp(pin_mux->name, function[j].name)) {
- match = 1;
- break;
- }
- }
-
- if (!match)
- return -EINVAL;
-
- func = function + j;
- grp_num = func->num_group_names;
- grp_size = sizeof(*func->group_names);
-
- if (!func->group_names) {
- func->group_names = devm_kcalloc(tpc->dev,
- grp_num,
- grp_size,
- GFP_KERNEL);
- if (!func->group_names) {
- kfree(func);
- return -ENOMEM;
- }
+ for (i = 0; i < tpc->nfuncs; i++) {
+ struct function_desc *func = &funcs[i];
+ const char **group_names;
+ unsigned int grp_idx = 0;
+ int j;
+
+ group_names = devm_kcalloc(tpc->dev, func->num_group_names,
+ sizeof(*group_names), GFP_KERNEL);
+ if (!group_names)
+ return -ENOMEM;
+
+ for (j = 0; j < tpc->soc->npins; j++) {
+ const struct pinctrl_pin_desc *pin_info = &thunderbay_pins[j];
+ struct thunderbay_mux_desc *pin_mux;
+
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ if (!strcmp(pin_mux->name, func->name))
+ group_names[grp_idx++] = pin_info->name;
}
-
- grp = func->group_names;
- while (*grp)
- grp++;
-
- *grp = pin_info->name;
- pin_mux++;
}
+
+ func->group_names = group_names;
}
/* Add all functions */
for (i = 0; i < tpc->nfuncs; i++) {
pinmux_generic_add_function(tpc->pctrl,
- function[i].name,
- function[i].group_names,
- function[i].num_group_names,
- function[i].data);
+ funcs[i].name,
+ funcs[i].group_names,
+ funcs[i].num_group_names,
+ funcs[i].data);
}
- kfree(function);
+ kfree(funcs);
return 0;
}
@@ -839,27 +818,30 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
void *ptr;
int pin;
- /* Total number of functions is unknown at this point. Allocate first. */
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+ * being part of 8 (hw maximum) globally unique muxes.
+ */
tpc->nfuncs = 0;
thunderbay_funcs = kcalloc(tpc->soc->npins * 8,
sizeof(*thunderbay_funcs), GFP_KERNEL);
if (!thunderbay_funcs)
return -ENOMEM;
- /* Find total number of functions and each's properties */
+ /* Setup 1 function for each unique mux */
for (pin = 0; pin < tpc->soc->npins; pin++) {
const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin;
- struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
+ struct thunderbay_mux_desc *pin_mux;
- while (pin_mux->name) {
- struct function_desc *func = thunderbay_funcs;
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ struct function_desc *func;
- while (func->name) {
+ /* Check if we already have function for this mux */
+ for (func = thunderbay_funcs; func->name; func++) {
if (!strcmp(pin_mux->name, func->name)) {
func->num_group_names++;
break;
}
- func++;
}
if (!func->name) {
@@ -868,8 +850,6 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
func->data = (int *)&pin_mux->mode;
tpc->nfuncs++;
}
-
- pin_mux++;
}
}
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index 42da6bd399ee..e14012209992 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -809,7 +809,6 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
unsigned int *npins)
{
struct pinctrl_pin_desc *pins, *pin;
- char **pin_names;
int ret;
int i;
@@ -821,14 +820,13 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
if (!pins)
return -ENOMEM;
- pin_names = devm_kasprintf_strarray(dev, ZYNQMP_PIN_PREFIX, *npins);
- if (IS_ERR(pin_names))
- return PTR_ERR(pin_names);
-
for (i = 0; i < *npins; i++) {
pin = &pins[i];
pin->number = i;
- pin->name = pin_names[i];
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ if (!pin->name)
+ return -ENOMEM;
}
*zynqmp_pins = pins;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index ce1917e230f4..152b71226a80 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -363,16 +363,16 @@ static const struct sunxi_desc_pin h616_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */
+ SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DO0 */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */
+ SUNXI_FUNCTION(0x5, "i2s3_din1"), /* DI1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */
+ SUNXI_FUNCTION(0x3, "i2s3_din0"), /* DI0 */
SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */
+ SUNXI_FUNCTION(0x5, "i2s3_dout1"), /* DO1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 80d6750c74a6..1f401377ff60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -36,6 +36,13 @@
#include "../core.h"
#include "pinctrl-sunxi.h"
+/*
+ * These lock classes tell lockdep that GPIO IRQs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key sunxi_pinctrl_irq_lock_class;
+static struct lock_class_key sunxi_pinctrl_irq_request_class;
+
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
@@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, true);
}
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
sunxi_pinctrl_gpio_set(chip, offset, value);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, false);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
int irqno = irq_create_mapping(pctl->domain, i);
+ irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class,
+ &sunxi_pinctrl_irq_request_class);
irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
handle_edge_irq);
irq_set_chip_data(irqno, pctl);
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fc5aa1525d13..d49a4efe46c8 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -302,13 +302,11 @@ EXPORT_SYMBOL(cros_ec_register);
*
* Return: 0 on success or negative error code.
*/
-int cros_ec_unregister(struct cros_ec_device *ec_dev)
+void cros_ec_unregister(struct cros_ec_device *ec_dev)
{
if (ec_dev->pd)
platform_device_unregister(ec_dev->pd);
platform_device_unregister(ec_dev->ec);
-
- return 0;
}
EXPORT_SYMBOL(cros_ec_unregister);
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
index 78363dcfdf23..bbca0096868a 100644
--- a/drivers/platform/chrome/cros_ec.h
+++ b/drivers/platform/chrome/cros_ec.h
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
int cros_ec_register(struct cros_ec_device *ec_dev);
-int cros_ec_unregister(struct cros_ec_device *ec_dev);
+void cros_ec_unregister(struct cros_ec_device *ec_dev);
int cros_ec_suspend(struct cros_ec_device *ec_dev);
int cros_ec_resume(struct cros_ec_device *ec_dev);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 30c8938c27d5..22feb0fd4ce7 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -313,7 +313,9 @@ static int cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index d6306d2a096f..7651417b4a25 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -439,7 +439,9 @@ static int cros_ec_lpc_remove(struct platform_device *pdev)
acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
cros_ec_lpc_acpi_notify);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
+
+ return 0;
}
static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 14c4046fa04d..8493af0f680e 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -786,11 +786,11 @@ static int cros_ec_spi_probe(struct spi_device *spi)
return 0;
}
-static int cros_ec_spi_remove(struct spi_device *spi)
+static void cros_ec_spi_remove(struct spi_device *spi)
{
struct cros_ec_device *ec_dev = spi_get_drvdata(spi);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 0d46706afd2d..4823bd2819f6 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -648,7 +648,7 @@ static struct olpc_ec_driver olpc_xo175_ec_driver = {
.ec_cmd = olpc_xo175_ec_cmd,
};
-static int olpc_xo175_ec_remove(struct spi_device *spi)
+static void olpc_xo175_ec_remove(struct spi_device *spi)
{
if (pm_power_off == olpc_xo175_ec_power_off)
pm_power_off = NULL;
@@ -657,8 +657,6 @@ static int olpc_xo175_ec_remove(struct spi_device *spi)
platform_device_unregister(olpc_ec);
olpc_ec = NULL;
-
- return 0;
}
static int olpc_xo175_ec_probe(struct spi_device *spi)
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 5f0578e25f71..463f1ec5c14e 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ARM64 || X86 || COMPILE_TEST
default y
help
Say Y here to get to see options for platform-specific device drivers
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index abac3eec565e..444ec81ba02d 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
}
bix->last_full_charg_capacity = ret;
- /* get serial number */
+ /*
+ * Get serial number, on some devices (with unofficial replacement
+ * battery?) reading any of the serial number range addresses gets
+ * nacked in this case just leave the serial number empty.
+ */
ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
sizeof(buf), buf);
- if (ret != sizeof(buf)) {
+ if (ret == -EREMOTEIO) {
+ /* no serial number available */
+ } else if (ret != sizeof(buf)) {
dev_err(&client->dev, "Error reading serial no: %d\n", ret);
return ret;
+ } else {
+ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
}
- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
/* get cycle count */
ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index f794343d6aaa..b1103f85a85a 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
@@ -85,6 +86,9 @@
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
+/* QoS request for letting CPUs in idle states, but not the deepest */
+#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3
+
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
@@ -124,12 +128,14 @@ struct amd_pmc_dev {
u32 cpu_id;
u32 active_ips;
/* SMU version information */
- u16 major;
- u16 minor;
- u16 rev;
+ u8 smu_program;
+ u8 major;
+ u8 minor;
+ u8 rev;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
+ struct pm_qos_request amd_pmc_pm_qos_req;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
@@ -180,11 +186,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
if (rc)
return rc;
- dev->major = (val >> 16) & GENMASK(15, 0);
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
dev->minor = (val >> 8) & GENMASK(7, 0);
dev->rev = (val >> 0) & GENMASK(7, 0);
- dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev);
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
return 0;
}
@@ -226,7 +234,7 @@ static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
return 0;
}
-const struct file_operations amd_pmc_stb_debugfs_fops = {
+static const struct file_operations amd_pmc_stb_debugfs_fops = {
.owner = THIS_MODULE,
.open = amd_pmc_stb_debugfs_open,
.read = amd_pmc_stb_debugfs_read,
@@ -518,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
rc = rtc_alarm_irq_enable(rtc_device, 0);
dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+ /*
+ * Prevent CPUs from getting into deep idle states while sending OS_HINT
+ * which is otherwise generally safe to send when at least one of the CPUs
+ * is not in deep idle states.
+ */
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY);
+ wake_up_all_idle_cpus();
+
return rc;
}
@@ -535,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
/* Activate CZN specific RTC functionality */
if (pdev->cpu_id == AMD_CPU_ID_CZN) {
rc = amd_pmc_verify_czn_rtc(pdev, &arg);
- if (rc < 0)
- return rc;
+ if (rc)
+ goto fail;
}
/* Dump the IdleMask before we send hint to SMU */
amd_pmc_idlemask_read(pdev, dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
- if (rc)
+ if (rc) {
dev_err(pdev->dev, "suspend failed\n");
+ goto fail;
+ }
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
- if (rc) {
+ if (rc) {
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
+ goto fail;
}
+ return 0;
+fail:
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
return rc;
}
@@ -576,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
- if (rc) {
+ if (rc)
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
- }
- return 0;
+ /* Restore the QoS request back to defaults if it was set */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+
+ return rc;
}
static const struct dev_pm_ops amd_pmc_pm_ops = {
@@ -719,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
+ cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
err_pci_dev_put:
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index d4ef8f362ee6..6fd0c9fea82d 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
return 0;
}
-struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
.parse = tf103c_dock_hid_parse,
.start = tf103c_dock_hid_start,
.stop = tf103c_dock_hid_stop,
@@ -921,7 +921,7 @@ static int __maybe_unused tf103c_dock_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
+static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
static const struct acpi_device_id tf103c_dock_acpi_match[] = {
{"NPCE69A"},
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a3b83b22a3b1..2104a2621e50 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV)
+ if (err == -ENODEV || err == -ENODATA)
return 0;
return err;
}
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
index 0374bc742513..e4299cfa2205 100644
--- a/drivers/platform/x86/intel/crystal_cove_charger.c
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#define CHGRIRQ_REG 0x0a
+#define MCHGRIRQ_REG 0x17
struct crystal_cove_charger_data {
struct mutex buslock; /* irq_bus_lock */
@@ -25,8 +26,8 @@ struct crystal_cove_charger_data {
struct irq_domain *irq_domain;
int irq;
int charger_irq;
- bool irq_enabled;
- bool irq_is_enabled;
+ u8 mask;
+ u8 new_mask;
};
static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
@@ -53,13 +54,9 @@ static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- if (charger->irq_is_enabled != charger->irq_enabled) {
- if (charger->irq_enabled)
- enable_irq(charger->irq);
- else
- disable_irq(charger->irq);
-
- charger->irq_is_enabled = charger->irq_enabled;
+ if (charger->mask != charger->new_mask) {
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask);
+ charger->mask = charger->new_mask;
}
mutex_unlock(&charger->buslock);
@@ -69,14 +66,14 @@ static void crystal_cove_charger_irq_unmask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = true;
+ charger->new_mask &= ~BIT(data->hwirq);
}
static void crystal_cove_charger_irq_mask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = false;
+ charger->new_mask |= BIT(data->hwirq);
}
static void crystal_cove_charger_rm_irq_domain(void *data)
@@ -130,10 +127,13 @@ static int crystal_cove_charger_probe(struct platform_device *pdev)
irq_set_nested_thread(charger->charger_irq, true);
irq_set_noprobe(charger->charger_irq);
+ /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */
+ charger->mask = charger->new_mask = BIT(0);
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask);
+
ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
crystal_cove_charger_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- KBUILD_MODNAME, charger);
+ IRQF_ONESHOT, KBUILD_MODNAME, charger);
if (ret)
return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index f93d437fd192..525f09a3b5ff 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
}
};
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index c9a85eb2e860..e8424e70d81d 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-static DEFINE_MUTEX(punit_misc_dev_lock);
+/* Lock to prevent module registration when already opened by user space */
+static DEFINE_MUTEX(punit_misc_dev_open_lock);
+/* Lock to allow one share misc device for all ISST interace */
+static DEFINE_MUTEX(punit_misc_dev_reg_lock);
static int misc_usage_count;
static int misc_device_ret;
static int misc_device_open;
@@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
int i, ret = 0;
/* Fail open, if a module is going away */
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
} else {
misc_device_open++;
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return ret;
}
@@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
{
int i;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
misc_device_open--;
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
if (cb->registered)
module_put(cb->owner);
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return 0;
}
@@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
+static int isst_misc_reg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ if (!misc_usage_count) {
+ misc_device_ret = isst_if_cpu_info_init();
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret) {
+ isst_if_cpu_info_exit();
+ goto unlock_exit;
+ }
+ }
+ misc_usage_count++;
+
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_reg_lock);
+
+ return misc_device_ret;
+}
+
+static void isst_misc_unreg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_usage_count)
+ misc_usage_count--;
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_reg_lock);
+}
+
/**
* isst_if_cdev_register() - Register callback for IOCTL
* @device_type: The device type this callback handling.
@@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = {
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- if (misc_device_ret)
- return misc_device_ret;
+ int ret;
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
+ /* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return -EAGAIN;
}
- if (!misc_usage_count) {
- int ret;
-
- misc_device_ret = misc_register(&isst_if_char_driver);
- if (misc_device_ret)
- goto unlock_exit;
-
- ret = isst_if_cpu_info_init();
- if (ret) {
- misc_deregister(&isst_if_char_driver);
- misc_device_ret = ret;
- goto unlock_exit;
- }
- }
memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
punit_callbacks[device_type].registered = 1;
- misc_usage_count++;
-unlock_exit:
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
- return misc_device_ret;
+ ret = isst_misc_reg();
+ if (ret) {
+ /*
+ * No need of mutex as the misc device register failed
+ * as no one can open device yet. Hence no contention.
+ */
+ punit_callbacks[device_type].registered = 0;
+ return ret;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- mutex_lock(&punit_misc_dev_lock);
- misc_usage_count--;
+ isst_misc_unreg();
+ mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].registered = 0;
if (device_type == ISST_IF_DEV_MBOX)
isst_delete_hash();
- if (!misc_usage_count && !misc_device_ret) {
- misc_deregister(&isst_if_char_driver);
- isst_if_cpu_info_exit();
- }
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 098180fb1cfc..3424b080db77 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8679,9 +8679,10 @@ static const struct attribute_group fan_driver_attr_group = {
.attrs = fan_driver_attributes,
};
-#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
-#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
-#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8702,6 +8703,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
+ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8730,6 +8733,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
quirks = tpacpi_check_quirks(fan_quirk_table,
ARRAY_SIZE(fan_quirk_table));
+ if (quirks & TPACPI_FAN_NOFAN) {
+ pr_info("No integrated ThinkPad fan available\n");
+ return -ENODEV;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -10112,6 +10120,9 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
+#define DYTC_FC_MMC 27 /* MMC Mode supported */
+
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10324,6 +10335,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (dytc_version < 5)
return -ENODEV;
+ /* Check what capabilities are supported. Currently MMC is needed */
+ err = dytc_command(DYTC_CMD_FUNC_CAP, &output);
+ if (err)
+ return err;
+ if (!(output & BIT(DYTC_FC_MMC))) {
+ dbg_printk(TPACPI_DBG_INIT, " DYTC MMC mode not supported\n");
+ return -ENODEV;
+ }
+
dbg_printk(TPACPI_DBG_INIT,
"DYTC version %d: thermal mode available\n", dytc_version);
/*
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 494f23052678..bc97bfa8e8a6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -770,6 +770,21 @@ static const struct ts_dmi_data predia_basic_data = {
.properties = predia_basic_props,
};
+static const struct property_entry rwc_nanote_p8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rwc_nanote_p8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rwc_nanote_p8_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1395,6 +1410,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* RWC NANOTE P8 */
+ .driver_data = (void *)&rwc_nanote_p8_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
+ },
+ },
+ {
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 3ba63ad91b28..9360a8a92486 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -26,6 +26,7 @@
#include <linux/string.h>
/* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */
#include "../../gpio/gpiolib.h"
+#include "../../gpio/gpiolib-acpi.h"
/*
* Helper code to get Linux IRQ numbers given a description of the IRQ source
@@ -47,7 +48,7 @@ struct x86_acpi_irq_data {
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
};
-static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data)
+static int gpiochip_find_match_label(struct gpio_chip *gc, void *data)
{
return gc->label && !strcmp(gc->label, data);
}
@@ -73,7 +74,7 @@ static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
return irq;
case X86_ACPI_IRQ_TYPE_GPIOINT:
/* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */
- chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find);
+ chip = gpiochip_find(data->chip, gpiochip_find_match_label);
if (!chip) {
pr_err("error cannot find GPIO chip %s\n", data->chip);
return -ENODEV;
@@ -143,14 +144,17 @@ struct x86_serdev_info {
};
struct x86_dev_info {
+ char *invalid_aei_gpiochip;
const char * const *modules;
- struct gpiod_lookup_table **gpiod_lookup_tables;
+ struct gpiod_lookup_table * const *gpiod_lookup_tables;
const struct x86_i2c_client_info *i2c_client_info;
const struct platform_device_info *pdev_info;
const struct x86_serdev_info *serdev_info;
int i2c_client_count;
int pdev_count;
int serdev_count;
+ int (*init)(void);
+ void (*exit)(void);
};
/* Generic / shared bq24190 settings */
@@ -187,8 +191,8 @@ static struct bq24190_platform_data bq24190_pdata = {
};
static const char * const bq24190_modules[] __initconst = {
- "crystal_cove_charger", /* For the bq24190 IRQ */
- "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ "intel_crystal_cove_charger", /* For the bq24190 IRQ */
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
NULL
};
@@ -302,7 +306,7 @@ static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *asus_me176c_gpios[] = {
+static struct gpiod_lookup_table * const asus_me176c_gpios[] = {
&int3496_gpo2_pin22_gpios,
&asus_me176c_goodix_gpios,
NULL
@@ -317,6 +321,7 @@ static const struct x86_dev_info asus_me176c_info __initconst = {
.serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
.gpiod_lookup_tables = asus_me176c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/* Asus TF103C tablets have an Android factory img with everything hardcoded */
@@ -405,7 +410,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table *asus_tf103c_gpios[] = {
+static struct gpiod_lookup_table * const asus_tf103c_gpios[] = {
&int3496_gpo2_pin22_gpios,
NULL
};
@@ -417,6 +422,7 @@ static const struct x86_dev_info asus_tf103c_info __initconst = {
.pdev_count = ARRAY_SIZE(int3496_pdevs),
.gpiod_lookup_tables = asus_tf103c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/*
@@ -490,6 +496,39 @@ static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
};
+#define CZC_EC_EXTRA_PORT 0x68
+#define CZC_EC_ANDROID_KEYS 0x63
+
+static int __init czc_p10t_init(void)
+{
+ /*
+ * The device boots up in "Windows 7" mode, when the home button sends a
+ * Windows specific key sequence (Left Meta + D) and the second button
+ * sends an unknown one while also toggling the Radio Kill Switch.
+ * This is a surprising behavior when the second button is labeled "Back".
+ *
+ * The vendor-supplied Android-x86 build switches the device to a "Android"
+ * mode by writing value 0x63 to the I/O port 0x68. This just seems to just
+ * set bit 6 on address 0x96 in the EC region; switching the bit directly
+ * seems to achieve the same result. It uses a "p10t_switcher" to do the
+ * job. It doesn't seem to be able to do anything else, and no other use
+ * of the port 0x68 is known.
+ *
+ * In the Android mode, the home button sends just a single scancode,
+ * which can be handled in Linux userspace more reasonably and the back
+ * button only sends a scancode without toggling the kill switch.
+ * The scancode can then be mapped either to Back or RF Kill functionality
+ * in userspace, depending on how the button is labeled on that particular
+ * model.
+ */
+ outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT);
+ return 0;
+}
+
+static const struct x86_dev_info czc_p10t __initconst = {
+ .init = czc_p10t_init,
+};
+
/*
* Whitelabel (sold as various brands) TM800A550L tablets.
* These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
@@ -559,7 +598,7 @@ static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = {
+static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = {
&whitelabel_tm800a550l_goodix_gpios,
NULL
};
@@ -642,6 +681,24 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&chuwi_hi8_info,
},
{
+ /* CZC P10T */
+ .ident = "CZC ODEON TPC-10 (\"P10T\")",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CZC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
+ /* A variant of CZC P10T */
+ .ident = "ViewSonic ViewPad 10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -669,7 +726,8 @@ static int serdev_count;
static struct i2c_client **i2c_clients;
static struct platform_device **pdevs;
static struct serdev_device **serdevs;
-static struct gpiod_lookup_table **gpiod_lookup_tables;
+static struct gpiod_lookup_table * const *gpiod_lookup_tables;
+static void (*exit_handler)(void);
static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info,
int idx)
@@ -787,6 +845,9 @@ static void x86_android_tablet_cleanup(void)
kfree(i2c_clients);
+ if (exit_handler)
+ exit_handler();
+
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
}
@@ -795,6 +856,7 @@ static __init int x86_android_tablet_init(void)
{
const struct x86_dev_info *dev_info;
const struct dmi_system_id *id;
+ struct gpio_chip *chip;
int i, ret = 0;
id = dmi_first_match(x86_android_tablet_ids);
@@ -804,6 +866,20 @@ static __init int x86_android_tablet_init(void)
dev_info = id->driver_data;
/*
+ * The broken DSDTs on these devices often also include broken
+ * _AEI (ACPI Event Interrupt) handlers, disable these.
+ */
+ if (dev_info->invalid_aei_gpiochip) {
+ chip = gpiochip_find(dev_info->invalid_aei_gpiochip,
+ gpiochip_find_match_label);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip);
+ return -ENODEV;
+ }
+ acpi_gpiochip_free_interrupts(chip);
+ }
+
+ /*
* Since this runs from module_init() it cannot use -EPROBE_DEFER,
* instead pre-load any modules which are listed as requirements.
*/
@@ -814,6 +890,15 @@ static __init int x86_android_tablet_init(void)
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+ if (dev_info->init) {
+ ret = dev_info->init();
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ exit_handler = dev_info->exit;
+ }
+
i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
if (!i2c_clients) {
x86_android_tablet_cleanup();
@@ -865,6 +950,6 @@ static __init int x86_android_tablet_init(void)
module_init(x86_android_tablet_init);
module_exit(x86_android_tablet_cleanup);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index cc6757dfa3f1..c02e7bf643a6 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -171,7 +171,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
error = pnp_drv->driver.pm->suspend(dev);
- suspend_report_result(pnp_drv->driver.pm->suspend, error);
+ suspend_report_result(dev, pnp_drv->driver.pm->suspend, error);
if (error)
return error;
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index afaf30a3622c..38928ff7472b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -287,9 +287,9 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
u32 lvl, void *context,
void **rv)
{
- struct acpi_device *device;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- if (acpi_bus_get_device(handle, &device))
+ if (!device)
return AE_CTRL_DEPTH;
if (acpi_is_pnp_device(device))
pnpacpi_add_device(device);
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index b274942dc46a..01ad84fd147c 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
BQ256XX_WDT_BIT_SHIFT);
ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret == -ENOMEM)
+ return ret;
+
if (ret) {
dev_warn(bq->dev, "battery info missing, default values will be applied\n");
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 0c87ad0dbf71..728e2a6cc9c3 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (ret) {
/* Allocate an empty battery */
cw_bat->battery = devm_kzalloc(&client->dev,
- sizeof(cw_bat->battery),
+ sizeof(*cw_bat->battery),
GFP_KERNEL);
if (!cw_bat->battery)
return -ENOMEM;
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 8242e8c5ed77..515e3ceb3393 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -46,6 +46,7 @@ config IDLE_INJECT
config DTPM
bool "Power capping for Dynamic Thermal Power Management (EXPERIMENTAL)"
+ depends on OF
help
This enables support for the power capping for the dynamic
thermal power management userspace engine.
@@ -56,4 +57,11 @@ config DTPM_CPU
help
This enables support for CPU power limitation based on
energy model.
+
+config DTPM_DEVFREQ
+ bool "Add device power capping based on the energy model"
+ depends on DTPM && ENERGY_MODEL
+ help
+ This enables support for device power limitation based on
+ energy model.
endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index fabcf388a8d3..494617cdad88 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DTPM) += dtpm.o
obj-$(CONFIG_DTPM_CPU) += dtpm_cpu.o
+obj-$(CONFIG_DTPM_DEVFREQ) += dtpm_devfreq.o
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index 8cb45f2d3d78..ce920f17f45f 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -23,6 +23,9 @@
#include <linux/powercap.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "dtpm_subsys.h"
#define DTPM_POWER_LIMIT_FLAG 0
@@ -48,9 +51,7 @@ static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
{
struct dtpm *dtpm = to_dtpm(pcz);
- mutex_lock(&dtpm_lock);
*max_power_uw = dtpm->power_max - dtpm->power_min;
- mutex_unlock(&dtpm_lock);
return 0;
}
@@ -80,14 +81,7 @@ static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw)
static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
{
- struct dtpm *dtpm = to_dtpm(pcz);
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __get_power_uw(dtpm, power_uw);
- mutex_unlock(&dtpm_lock);
-
- return ret;
+ return __get_power_uw(to_dtpm(pcz), power_uw);
}
static void __dtpm_rebalance_weight(struct dtpm *dtpm)
@@ -130,7 +124,16 @@ static void __dtpm_add_power(struct dtpm *dtpm)
}
}
-static int __dtpm_update_power(struct dtpm *dtpm)
+/**
+ * dtpm_update_power - Update the power on the dtpm
+ * @dtpm: a pointer to a dtpm structure to update
+ *
+ * Function to update the power values of the dtpm node specified in
+ * parameter. These new values will be propagated to the tree.
+ *
+ * Return: zero on success, -EINVAL if the values are inconsistent
+ */
+int dtpm_update_power(struct dtpm *dtpm)
{
int ret;
@@ -153,26 +156,6 @@ static int __dtpm_update_power(struct dtpm *dtpm)
}
/**
- * dtpm_update_power - Update the power on the dtpm
- * @dtpm: a pointer to a dtpm structure to update
- *
- * Function to update the power values of the dtpm node specified in
- * parameter. These new values will be propagated to the tree.
- *
- * Return: zero on success, -EINVAL if the values are inconsistent
- */
-int dtpm_update_power(struct dtpm *dtpm)
-{
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __dtpm_update_power(dtpm);
- mutex_unlock(&dtpm_lock);
-
- return ret;
-}
-
-/**
* dtpm_release_zone - Cleanup when the node is released
* @pcz: a pointer to a powercap_zone structure
*
@@ -188,48 +171,28 @@ int dtpm_release_zone(struct powercap_zone *pcz)
struct dtpm *dtpm = to_dtpm(pcz);
struct dtpm *parent = dtpm->parent;
- mutex_lock(&dtpm_lock);
-
- if (!list_empty(&dtpm->children)) {
- mutex_unlock(&dtpm_lock);
+ if (!list_empty(&dtpm->children))
return -EBUSY;
- }
if (parent)
list_del(&dtpm->sibling);
__dtpm_sub_power(dtpm);
- mutex_unlock(&dtpm_lock);
-
if (dtpm->ops)
dtpm->ops->release(dtpm);
+ else
+ kfree(dtpm);
- if (root == dtpm)
- root = NULL;
-
- kfree(dtpm);
-
- return 0;
-}
-
-static int __get_power_limit_uw(struct dtpm *dtpm, int cid, u64 *power_limit)
-{
- *power_limit = dtpm->power_limit;
return 0;
}
static int get_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 *power_limit)
{
- struct dtpm *dtpm = to_dtpm(pcz);
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __get_power_limit_uw(dtpm, cid, power_limit);
- mutex_unlock(&dtpm_lock);
-
- return ret;
+ *power_limit = to_dtpm(pcz)->power_limit;
+
+ return 0;
}
/*
@@ -289,7 +252,7 @@ static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit)
ret = __set_power_limit_uw(child, cid, power);
if (!ret)
- ret = __get_power_limit_uw(child, cid, &power);
+ ret = get_power_limit_uw(&child->zone, cid, &power);
if (ret)
break;
@@ -307,8 +270,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz,
struct dtpm *dtpm = to_dtpm(pcz);
int ret;
- mutex_lock(&dtpm_lock);
-
/*
* Don't allow values outside of the power range previously
* set when initializing the power numbers.
@@ -320,8 +281,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz,
pr_debug("%s: power limit: %llu uW, power max: %llu uW\n",
dtpm->zone.name, dtpm->power_limit, dtpm->power_max);
- mutex_unlock(&dtpm_lock);
-
return ret;
}
@@ -332,11 +291,7 @@ static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
{
- struct dtpm *dtpm = to_dtpm(pcz);
-
- mutex_lock(&dtpm_lock);
- *max_power = dtpm->power_max;
- mutex_unlock(&dtpm_lock);
+ *max_power = to_dtpm(pcz)->power_max;
return 0;
}
@@ -439,8 +394,6 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
if (IS_ERR(pcz))
return PTR_ERR(pcz);
- mutex_lock(&dtpm_lock);
-
if (parent) {
list_add_tail(&dtpm->sibling, &parent->children);
dtpm->parent = parent;
@@ -456,19 +409,253 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
pr_debug("Registered dtpm node '%s' / %llu-%llu uW, \n",
dtpm->zone.name, dtpm->power_min, dtpm->power_max);
- mutex_unlock(&dtpm_lock);
+ return 0;
+}
+
+static struct dtpm *dtpm_setup_virtual(const struct dtpm_node *hierarchy,
+ struct dtpm *parent)
+{
+ struct dtpm *dtpm;
+ int ret;
+
+ dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL);
+ if (!dtpm)
+ return ERR_PTR(-ENOMEM);
+ dtpm_init(dtpm, NULL);
+
+ ret = dtpm_register(hierarchy->name, dtpm, parent);
+ if (ret) {
+ pr_err("Failed to register dtpm node '%s': %d\n",
+ hierarchy->name, ret);
+ kfree(dtpm);
+ return ERR_PTR(ret);
+ }
+
+ return dtpm;
+}
+
+static struct dtpm *dtpm_setup_dt(const struct dtpm_node *hierarchy,
+ struct dtpm *parent)
+{
+ struct device_node *np;
+ int i, ret;
+
+ np = of_find_node_by_path(hierarchy->name);
+ if (!np) {
+ pr_err("Failed to find '%s'\n", hierarchy->name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->setup)
+ continue;
+
+ ret = dtpm_subsys[i]->setup(parent, np);
+ if (ret) {
+ pr_err("Failed to setup '%s': %d\n", dtpm_subsys[i]->name, ret);
+ of_node_put(np);
+ return ERR_PTR(ret);
+ }
+ }
+
+ of_node_put(np);
+
+ /*
+ * By returning a NULL pointer, we let know the caller there
+ * is no child for us as we are a leaf of the tree
+ */
+ return NULL;
+}
+
+typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *);
+
+static dtpm_node_callback_t dtpm_node_callback[] = {
+ [DTPM_NODE_VIRTUAL] = dtpm_setup_virtual,
+ [DTPM_NODE_DT] = dtpm_setup_dt,
+};
+
+static int dtpm_for_each_child(const struct dtpm_node *hierarchy,
+ const struct dtpm_node *it, struct dtpm *parent)
+{
+ struct dtpm *dtpm;
+ int i, ret;
+
+ for (i = 0; hierarchy[i].name; i++) {
+
+ if (hierarchy[i].parent != it)
+ continue;
+
+ dtpm = dtpm_node_callback[hierarchy[i].type](&hierarchy[i], parent);
+
+ /*
+ * A NULL pointer means there is no children, hence we
+ * continue without going deeper in the recursivity.
+ */
+ if (!dtpm)
+ continue;
+
+ /*
+ * There are multiple reasons why the callback could
+ * fail. The generic glue is abstracting the backend
+ * and therefore it is not possible to report back or
+ * take a decision based on the error. In any case,
+ * if this call fails, it is not critical in the
+ * hierarchy creation, we can assume the underlying
+ * service is not found, so we continue without this
+ * branch in the tree but with a warning to log the
+ * information the node was not created.
+ */
+ if (IS_ERR(dtpm)) {
+ pr_warn("Failed to create '%s' in the hierarchy\n",
+ hierarchy[i].name);
+ continue;
+ }
+
+ ret = dtpm_for_each_child(hierarchy, &hierarchy[i], dtpm);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int __init init_dtpm(void)
+/**
+ * dtpm_create_hierarchy - Create the dtpm hierarchy
+ * @hierarchy: An array of struct dtpm_node describing the hierarchy
+ *
+ * The function is called by the platform specific code with the
+ * description of the different node in the hierarchy. It creates the
+ * tree in the sysfs filesystem under the powercap dtpm entry.
+ *
+ * The expected tree has the format:
+ *
+ * struct dtpm_node hierarchy[] = {
+ * [0] { .name = "topmost", type = DTPM_NODE_VIRTUAL },
+ * [1] { .name = "package", .type = DTPM_NODE_VIRTUAL, .parent = &hierarchy[0] },
+ * [2] { .name = "/cpus/cpu0", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [3] { .name = "/cpus/cpu1", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [4] { .name = "/cpus/cpu2", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [5] { .name = "/cpus/cpu3", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [6] { }
+ * };
+ *
+ * The last element is always an empty one and marks the end of the
+ * array.
+ *
+ * Return: zero on success, a negative value in case of error. Errors
+ * are reported back from the underlying functions.
+ */
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table)
{
+ const struct of_device_id *match;
+ const struct dtpm_node *hierarchy;
+ struct device_node *np;
+ int i, ret;
+
+ mutex_lock(&dtpm_lock);
+
+ if (pct) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
pr_err("Failed to register control type\n");
- return PTR_ERR(pct);
+ ret = PTR_ERR(pct);
+ goto out_pct;
}
+ ret = -ENODEV;
+ np = of_find_node_by_path("/");
+ if (!np)
+ goto out_err;
+
+ match = of_match_node(dtpm_match_table, np);
+
+ of_node_put(np);
+
+ if (!match)
+ goto out_err;
+
+ hierarchy = match->data;
+ if (!hierarchy) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ ret = dtpm_for_each_child(hierarchy, NULL, NULL);
+ if (ret)
+ goto out_err;
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->init)
+ continue;
+
+ ret = dtpm_subsys[i]->init();
+ if (ret)
+ pr_info("Failed to initialize '%s': %d",
+ dtpm_subsys[i]->name, ret);
+ }
+
+ mutex_unlock(&dtpm_lock);
+
return 0;
+
+out_err:
+ powercap_unregister_control_type(pct);
+out_pct:
+ pct = NULL;
+out_unlock:
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dtpm_create_hierarchy);
+
+static void __dtpm_destroy_hierarchy(struct dtpm *dtpm)
+{
+ struct dtpm *child, *aux;
+
+ list_for_each_entry_safe(child, aux, &dtpm->children, sibling)
+ __dtpm_destroy_hierarchy(child);
+
+ /*
+ * At this point, we know all children were removed from the
+ * recursive call before
+ */
+ dtpm_unregister(dtpm);
+}
+
+void dtpm_destroy_hierarchy(void)
+{
+ int i;
+
+ mutex_lock(&dtpm_lock);
+
+ if (!pct)
+ goto out_unlock;
+
+ __dtpm_destroy_hierarchy(root);
+
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->exit)
+ continue;
+
+ dtpm_subsys[i]->exit();
+ }
+
+ powercap_unregister_control_type(pct);
+
+ pct = NULL;
+
+ root = NULL;
+
+out_unlock:
+ mutex_unlock(&dtpm_lock);
}
-late_initcall(init_dtpm);
+EXPORT_SYMBOL_GPL(dtpm_destroy_hierarchy);
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index b740866b228d..bca2f912d349 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -21,6 +21,7 @@
#include <linux/cpuhotplug.h>
#include <linux/dtpm.h>
#include <linux/energy_model.h>
+#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
@@ -150,10 +151,17 @@ static int update_pd_power_uw(struct dtpm *dtpm)
static void pd_release(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
+ struct cpufreq_policy *policy;
if (freq_qos_request_active(&dtpm_cpu->qos_req))
freq_qos_remove_request(&dtpm_cpu->qos_req);
+ policy = cpufreq_cpu_get(dtpm_cpu->cpu);
+ if (policy) {
+ for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
+ }
+
kfree(dtpm_cpu);
}
@@ -178,11 +186,26 @@ static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
{
struct dtpm_cpu *dtpm_cpu;
+
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ return dtpm_update_power(&dtpm_cpu->dtpm);
+
+ return 0;
+}
+
+static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+{
+ struct dtpm_cpu *dtpm_cpu;
struct cpufreq_policy *policy;
struct em_perf_domain *pd;
char name[CPUFREQ_NAME_LEN];
int ret = -ENOMEM;
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ return 0;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -191,10 +214,6 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
if (!pd)
return -EINVAL;
- dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
- if (dtpm_cpu)
- return dtpm_update_power(&dtpm_cpu->dtpm);
-
dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
if (!dtpm_cpu)
return -ENOMEM;
@@ -207,7 +226,7 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
- ret = dtpm_register(name, &dtpm_cpu->dtpm, NULL);
+ ret = dtpm_register(name, &dtpm_cpu->dtpm, parent);
if (ret)
goto out_kfree_dtpm_cpu;
@@ -231,7 +250,18 @@ out_kfree_dtpm_cpu:
return ret;
}
-static int __init dtpm_cpu_init(void)
+static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np)
+{
+ int cpu;
+
+ cpu = of_cpu_node_to_id(np);
+ if (cpu < 0)
+ return 0;
+
+ return __dtpm_cpu_setup(cpu, dtpm);
+}
+
+static int dtpm_cpu_init(void)
{
int ret;
@@ -269,4 +299,15 @@ static int __init dtpm_cpu_init(void)
return 0;
}
-DTPM_DECLARE(dtpm_cpu, dtpm_cpu_init);
+static void dtpm_cpu_exit(void)
+{
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+ cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD);
+}
+
+struct dtpm_subsys_ops dtpm_cpu_ops = {
+ .name = KBUILD_MODNAME,
+ .init = dtpm_cpu_init,
+ .exit = dtpm_cpu_exit,
+ .setup = dtpm_cpu_setup,
+};
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
new file mode 100644
index 000000000000..91276761a31d
--- /dev/null
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The devfreq device combined with the energy model and the load can
+ * give an estimation of the power consumption as well as limiting the
+ * power.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpumask.h>
+#include <linux/devfreq.h>
+#include <linux/dtpm.h>
+#include <linux/energy_model.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+struct dtpm_devfreq {
+ struct dtpm dtpm;
+ struct dev_pm_qos_request qos_req;
+ struct devfreq *devfreq;
+};
+
+static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
+{
+ return container_of(dtpm, struct dtpm_devfreq, dtpm);
+}
+
+static int update_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+
+ dtpm->power_min = pd->table[0].power;
+ dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+
+ dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+ dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+
+ return 0;
+}
+
+static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ unsigned long freq;
+ u64 power;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ if (power > power_limit)
+ break;
+ }
+
+ freq = pd->table[i - 1].frequency;
+
+ dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+
+ power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
+
+ return power_limit;
+}
+
+static void _normalize_load(struct devfreq_dev_status *status)
+{
+ if (status->total_time > 0xfffff) {
+ status->total_time >>= 10;
+ status->busy_time >>= 10;
+ }
+
+ status->busy_time <<= 10;
+ status->busy_time /= status->total_time ? : 1;
+
+ status->busy_time = status->busy_time ? : 1;
+ status->total_time = 1024;
+}
+
+static u64 get_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ struct devfreq_dev_status status;
+ unsigned long freq;
+ u64 power;
+ int i;
+
+ mutex_lock(&devfreq->lock);
+ status = devfreq->last_status;
+ mutex_unlock(&devfreq->lock);
+
+ freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
+ _normalize_load(&status);
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ if (pd->table[i].frequency < freq)
+ continue;
+
+ power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ power *= status.busy_time;
+ power >>= 10;
+
+ return power;
+ }
+
+ return 0;
+}
+
+static void pd_release(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+
+ if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
+ dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
+
+ kfree(dtpm_devfreq);
+}
+
+static struct dtpm_ops dtpm_ops = {
+ .set_power_uw = set_pd_power_limit,
+ .get_power_uw = get_pd_power_uw,
+ .update_power_uw = update_pd_power_uw,
+ .release = pd_release,
+};
+
+static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
+{
+ struct device *dev = devfreq->dev.parent;
+ struct dtpm_devfreq *dtpm_devfreq;
+ struct em_perf_domain *pd;
+ int ret = -ENOMEM;
+
+ pd = em_pd_get(dev);
+ if (!pd) {
+ ret = dev_pm_opp_of_register_em(dev, NULL);
+ if (ret) {
+ pr_err("No energy model available for '%s'\n", dev_name(dev));
+ return -EINVAL;
+ }
+ }
+
+ dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
+ if (!dtpm_devfreq)
+ return -ENOMEM;
+
+ dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
+
+ dtpm_devfreq->devfreq = devfreq;
+
+ ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
+ if (ret) {
+ pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
+ kfree(dtpm_devfreq);
+ return ret;
+ }
+
+ ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ if (ret) {
+ pr_err("Failed to add QoS request: %d\n", ret);
+ goto out_dtpm_unregister;
+ }
+
+ dtpm_update_power(&dtpm_devfreq->dtpm);
+
+ return 0;
+
+out_dtpm_unregister:
+ dtpm_unregister(&dtpm_devfreq->dtpm);
+
+ return ret;
+}
+
+static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
+{
+ struct devfreq *devfreq;
+
+ devfreq = devfreq_get_devfreq_by_node(np);
+ if (IS_ERR(devfreq))
+ return 0;
+
+ return __dtpm_devfreq_setup(devfreq, dtpm);
+}
+
+struct dtpm_subsys_ops dtpm_devfreq_ops = {
+ .name = KBUILD_MODNAME,
+ .setup = dtpm_devfreq_setup,
+};
diff --git a/drivers/powercap/dtpm_subsys.h b/drivers/powercap/dtpm_subsys.h
new file mode 100644
index 000000000000..db1712938a96
--- /dev/null
+++ b/drivers/powercap/dtpm_subsys.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_SUBSYS_H__
+#define ___DTPM_SUBSYS_H__
+
+extern struct dtpm_subsys_ops dtpm_cpu_ops;
+extern struct dtpm_subsys_ops dtpm_devfreq_ops;
+
+struct dtpm_subsys_ops *dtpm_subsys[] = {
+#ifdef CONFIG_DTPM_CPU
+ &dtpm_cpu_ops,
+#endif
+#ifdef CONFIG_DTPM_DEVFREQ
+ &dtpm_devfreq_ops,
+#endif
+};
+
+#endif
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0f1b5a7d2a89..17ad5f0d13b2 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -607,7 +607,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
}
static void
-__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
{
u32 select, ctrl;
@@ -615,7 +615,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(adj_val, &bp->reg->offset_ns);
- iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+ iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns);
ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
@@ -624,6 +624,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(select >> 16, &bp->reg->select);
}
+static void
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+{
+ struct timespec64 ts;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+ if (likely(!err)) {
+ timespec64_add_ns(&ts, delta_ns);
+ __ptp_ocp_settime_locked(bp, &ts);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
@@ -631,6 +647,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
unsigned long flags;
u32 adj_ns, sign;
+ if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) {
+ ptp_ocp_adjtime_coarse(bp, delta_ns);
+ return 0;
+ }
+
sign = delta_ns < 0 ? BIT(31) : 0;
adj_ns = sign ? -delta_ns : delta_ns;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 1c35fed20d34..c8ce6e5eea24 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -984,6 +984,7 @@ config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY
tristate "Raspberry Pi 7-inch touchscreen panel ATTINY regulator"
depends on BACKLIGHT_CLASS_DEVICE
depends on I2C
+ depends on OF_GPIO
select REGMAP_I2C
help
This driver supports ATTINY regulator on the Raspberry Pi 7-inch
@@ -1046,6 +1047,16 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RT5190A
+ tristate "Richtek RT5190A PMIC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds support for voltage regulator in Richtek RT5190A PMIC.
+ It integratas 1 channel buck controller, 3 channels high efficiency
+ buck converters, 1 LDO, mute AC OFF depop function, with the general
+ I2C control interface.
+
config REGULATOR_RT6160
tristate "Richtek RT6160 BuckBoost voltage regulator"
depends on I2C
@@ -1263,6 +1274,15 @@ config REGULATOR_TPS62360
high-frequency synchronous step down dc-dc converter optimized
for battery-powered portable applications.
+config REGULATOR_TPS6286X
+ tristate "TI TPS6286x Power Regulator"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ This driver supports TPS6236x voltage regulator chips. These are
+ high-frequency synchronous step-down converters with an I2C
+ interface.
+
config REGULATOR_TPS65023
tristate "TI TPS65023 Power regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2e1b087489fa..1b64ad5767be 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RT5190A) += rt5190a-regulator.o
obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
@@ -149,6 +150,7 @@ obj-$(CONFIG_REGULATOR_SY8827N) += sy8827n.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6286X) += tps6286x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65086) += tps65086-regulator.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 86aa4141efa9..d2553970a67b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -6014,9 +6014,8 @@ core_initcall(regulator_init);
static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const struct regulator_ops *ops = rdev->desc->ops;
struct regulation_constraints *c = rdev->constraints;
- int enabled, ret;
+ int ret;
if (c && c->always_on)
return 0;
@@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data)
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's always on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- /* But if reading the status failed, assume that it's off. */
- if (enabled <= 0)
+ /* If reading the status failed, assume that it's off. */
+ if (_regulator_is_enabled(rdev) <= 0)
goto unlock;
if (have_full_constraints()) {
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 6f21223a488e..eb9df485bd8a 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = {
};
static struct da9121_range da914x_40A_4phase_current = {
- .val_min = 14000000,
- .val_max = 80000000,
- .val_stp = 2000000,
+ .val_min = 26000000,
+ .val_max = 78000000,
+ .val_stp = 4000000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da914x_20A_2phase_current = {
- .val_min = 7000000,
- .val_max = 40000000,
+ .val_min = 13000000,
+ .val_max = 39000000,
.val_stp = 2000000,
.reg_min = 1,
.reg_max = 14,
@@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = {
};
#define DA914X_MIN_MV 500
-#define DA914X_MAX_MV 1000
+#define DA914X_MAX_MV 1300
#define DA914X_STEP_MV 10
#define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV)
#define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
@@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = {
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
- /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
- .ramp_delay = 20000,
- /* tBUCK_EN */
- .enable_time = 20,
};
static const struct regulator_desc da9142_reg = {
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index fbc56b043071..b8bf76c170fe 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -140,7 +141,7 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
node = of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
- return PTR_ERR(node);
+ return -ENODEV;
}
for (i = 0; i < chip->info->num_outputs; ++i)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 80b65cb87cef..cb7e50003f70 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -459,7 +459,7 @@ static int max8973_thermal_read_temp(void *data, int *temp)
return ret;
}
- /* +1 degC to trigger cool devive */
+ /* +1 degC to trigger cool device */
if (val & MAX77621_CHIPID_TJINT_S)
*temp = mchip->junction_temp_warning + 1000;
else
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index a3bc0eb6ceb8..561de6b2e6e3 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1121,6 +1121,39 @@ static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pmx65_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_hfsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_hfsmps510, "vdd-s8"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l18"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo, "vdd-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo, "vdd-l12"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_nldo, "vdd-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo, "vdd-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_nldo, "vdd-l17"),
+ /* ldo18 not configured */
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_nldo, "vdd-l19"),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic5_nldo, "vdd-l20"),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic5_nldo, "vdd-l21"),
+ {}
+};
+
static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"),
@@ -1277,6 +1310,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pmx55_vreg_data,
},
{
+ .compatible = "qcom,pmx65-rpmh-regulators",
+ .data = pmx65_vreg_data,
+ },
+ {
.compatible = "qcom,pm7325-rpmh-regulators",
.data = pm7325_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 9fc666107a06..8490aa8eecb1 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -1317,8 +1317,10 @@ static int rpm_reg_probe(struct platform_device *pdev)
for_each_available_child_of_node(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
+ if (!vreg) {
+ of_node_put(node);
return -ENOMEM;
+ }
ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index ee46bfbf5eee..f7df0f4b2f87 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -21,63 +22,146 @@
/* I2C registers of the Atmel microcontroller. */
#define REG_ID 0x80
#define REG_PORTA 0x81
-#define REG_PORTA_HF BIT(2)
-#define REG_PORTA_VF BIT(3)
#define REG_PORTB 0x82
+#define REG_PORTC 0x83
#define REG_POWERON 0x85
#define REG_PWM 0x86
+#define REG_ADDR_L 0x8c
+#define REG_ADDR_H 0x8d
+#define REG_WRITE_DATA_H 0x90
+#define REG_WRITE_DATA_L 0x91
+
+#define PA_LCD_DITHB BIT(0)
+#define PA_LCD_MODE BIT(1)
+#define PA_LCD_LR BIT(2)
+#define PA_LCD_UD BIT(3)
+
+#define PB_BRIDGE_PWRDNX_N BIT(0)
+#define PB_LCD_VCC_N BIT(1)
+#define PB_LCD_MAIN BIT(7)
+
+#define PC_LED_EN BIT(0)
+#define PC_RST_TP_N BIT(1)
+#define PC_RST_LCD_N BIT(2)
+#define PC_RST_BRIDGE_N BIT(3)
+
+enum gpio_signals {
+ RST_BRIDGE_N, /* TC358762 bridge reset */
+ RST_TP_N, /* Touch controller reset */
+ NUM_GPIO
+};
+
+struct gpio_signal_mappings {
+ unsigned int reg;
+ unsigned int mask;
+};
+
+static const struct gpio_signal_mappings mappings[NUM_GPIO] = {
+ [RST_BRIDGE_N] = { REG_PORTC, PC_RST_BRIDGE_N | PC_RST_LCD_N },
+ [RST_TP_N] = { REG_PORTC, PC_RST_TP_N },
+};
+
+struct attiny_lcd {
+ /* lock to serialise overall accesses to the Atmel */
+ struct mutex lock;
+ struct regmap *regmap;
+ bool gpio_states[NUM_GPIO];
+ u8 port_states[3];
+
+ struct gpio_chip gc;
+};
static const struct regmap_config attiny_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = REG_PWM,
- .cache_type = REGCACHE_NONE,
+ .disable_locking = 1,
+ .max_register = REG_WRITE_DATA_L,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int attiny_set_port_state(struct attiny_lcd *state, int reg, u8 val)
+{
+ state->port_states[reg - REG_PORTA] = val;
+ return regmap_write(state->regmap, reg, val);
+};
+
+static u8 attiny_get_port_state(struct attiny_lcd *state, int reg)
+{
+ return state->port_states[reg - REG_PORTA];
};
static int attiny_lcd_power_enable(struct regulator_dev *rdev)
{
- unsigned int data;
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
- regmap_write(rdev->regmap, REG_POWERON, 1);
- /* Wait for nPWRDWN to go low to indicate poweron is done. */
- regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data,
- data & BIT(0), 10, 1000000);
+ mutex_lock(&state->lock);
+
+ /* Ensure bridge, and tp stay in reset */
+ attiny_set_port_state(state, REG_PORTC, 0);
+ usleep_range(5000, 10000);
/* Default to the same orientation as the closed source
* firmware used for the panel. Runtime rotation
* configuration will be supported using VC4's plane
* orientation bits.
*/
- regmap_write(rdev->regmap, REG_PORTA, BIT(2));
+ attiny_set_port_state(state, REG_PORTA, PA_LCD_LR);
+ usleep_range(5000, 10000);
+ /* Main regulator on, and power to the panel (LCD_VCC_N) */
+ attiny_set_port_state(state, REG_PORTB, PB_LCD_MAIN);
+ usleep_range(5000, 10000);
+ /* Bring controllers out of reset */
+ attiny_set_port_state(state, REG_PORTC, PC_LED_EN);
+
+ msleep(80);
+
+ mutex_unlock(&state->lock);
return 0;
}
static int attiny_lcd_power_disable(struct regulator_dev *rdev)
{
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
+
+ mutex_lock(&state->lock);
+
regmap_write(rdev->regmap, REG_PWM, 0);
- regmap_write(rdev->regmap, REG_POWERON, 0);
- udelay(1);
+ usleep_range(5000, 10000);
+
+ attiny_set_port_state(state, REG_PORTA, 0);
+ usleep_range(5000, 10000);
+ attiny_set_port_state(state, REG_PORTB, PB_LCD_VCC_N);
+ usleep_range(5000, 10000);
+ attiny_set_port_state(state, REG_PORTC, 0);
+ msleep(30);
+
+ mutex_unlock(&state->lock);
+
return 0;
}
static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
{
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
unsigned int data;
- int ret;
+ int ret, i;
- ret = regmap_read(rdev->regmap, REG_POWERON, &data);
- if (ret < 0)
- return ret;
+ mutex_lock(&state->lock);
- if (!(data & BIT(0)))
- return 0;
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(rdev->regmap, REG_PORTC, &data);
+ if (!ret)
+ break;
+ usleep_range(10000, 12000);
+ }
+
+ mutex_unlock(&state->lock);
- ret = regmap_read(rdev->regmap, REG_PORTB, &data);
if (ret < 0)
return ret;
- return data & BIT(0);
+ return data & PC_RST_BRIDGE_N;
}
static const struct regulator_init_data attiny_regulator_default = {
@@ -101,33 +185,104 @@ static const struct regulator_desc attiny_regulator = {
static int attiny_update_status(struct backlight_device *bl)
{
- struct regmap *regmap = bl_get_data(bl);
+ struct attiny_lcd *state = bl_get_data(bl);
+ struct regmap *regmap = state->regmap;
int brightness = bl->props.brightness;
+ int ret, i;
+
+ mutex_lock(&state->lock);
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
- return regmap_write(regmap, REG_PWM, brightness);
-}
-
-static int attiny_get_brightness(struct backlight_device *bl)
-{
- struct regmap *regmap = bl_get_data(bl);
- int ret, brightness;
+ for (i = 0; i < 10; i++) {
+ ret = regmap_write(regmap, REG_PWM, brightness);
+ if (!ret)
+ break;
+ }
- ret = regmap_read(regmap, REG_PWM, &brightness);
- if (ret)
- return ret;
+ mutex_unlock(&state->lock);
- return brightness;
+ return ret;
}
static const struct backlight_ops attiny_bl = {
.update_status = attiny_update_status,
- .get_brightness = attiny_get_brightness,
};
+static int attiny_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void attiny_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+{
+ struct attiny_lcd *state = gpiochip_get_data(gc);
+ u8 last_val;
+
+ if (off >= NUM_GPIO)
+ return;
+
+ mutex_lock(&state->lock);
+
+ last_val = attiny_get_port_state(state, mappings[off].reg);
+ if (val)
+ last_val |= mappings[off].mask;
+ else
+ last_val &= ~mappings[off].mask;
+
+ attiny_set_port_state(state, mappings[off].reg, last_val);
+
+ if (off == RST_BRIDGE_N && val) {
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_ADDR_H, 0x04);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_ADDR_L, 0x7c);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_WRITE_DATA_H, 0x00);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_WRITE_DATA_L, 0x00);
+
+ msleep(100);
+ }
+
+ mutex_unlock(&state->lock);
+}
+
+static int attiny_i2c_read(struct i2c_client *client, u8 reg, unsigned int *buf)
+{
+ struct i2c_msg msgs[1];
+ u8 addr_buf[1] = { reg };
+ u8 data_buf[1] = { 0, };
+ int ret;
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ usleep_range(5000, 10000);
+
+ /* Read data from register */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].len = 1;
+ msgs[0].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *buf = data_buf[0];
+ return 0;
+}
+
/*
* I2C driver interface functions
*/
@@ -138,22 +293,30 @@ static int attiny_i2c_probe(struct i2c_client *i2c,
struct regulator_config config = { };
struct backlight_device *bl;
struct regulator_dev *rdev;
+ struct attiny_lcd *state;
struct regmap *regmap;
unsigned int data;
int ret;
+ state = devm_kzalloc(&i2c->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ i2c_set_clientdata(i2c, state);
+
regmap = devm_regmap_init_i2c(i2c, &attiny_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- return ret;
+ goto error;
}
- ret = regmap_read(regmap, REG_ID, &data);
+ ret = attiny_i2c_read(i2c, REG_ID, &data);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read REG_ID reg: %d\n", ret);
- return ret;
+ goto error;
}
switch (data) {
@@ -162,34 +325,73 @@ static int attiny_i2c_probe(struct i2c_client *i2c,
break;
default:
dev_err(&i2c->dev, "Unknown Atmel firmware revision: 0x%02x\n", data);
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
regmap_write(regmap, REG_POWERON, 0);
- mdelay(1);
+ msleep(30);
+ regmap_write(regmap, REG_PWM, 0);
config.dev = &i2c->dev;
config.regmap = regmap;
config.of_node = i2c->dev.of_node;
config.init_data = &attiny_regulator_default;
+ config.driver_data = state;
rdev = devm_regulator_register(&i2c->dev, &attiny_regulator, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register ATTINY regulator\n");
- return PTR_ERR(rdev);
+ ret = PTR_ERR(rdev);
+ goto error;
}
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
- bl = devm_backlight_device_register(&i2c->dev,
- "7inch-touchscreen-panel-bl",
- &i2c->dev, regmap, &attiny_bl,
+
+ state->regmap = regmap;
+
+ bl = devm_backlight_device_register(&i2c->dev, dev_name(&i2c->dev),
+ &i2c->dev, state, &attiny_bl,
&props);
- if (IS_ERR(bl))
- return PTR_ERR(bl);
+ if (IS_ERR(bl)) {
+ ret = PTR_ERR(bl);
+ goto error;
+ }
bl->props.brightness = 0xff;
+ state->gc.parent = &i2c->dev;
+ state->gc.label = i2c->name;
+ state->gc.owner = THIS_MODULE;
+ state->gc.of_node = i2c->dev.of_node;
+ state->gc.base = -1;
+ state->gc.ngpio = NUM_GPIO;
+
+ state->gc.set = attiny_gpio_set;
+ state->gc.get_direction = attiny_gpio_get_direction;
+ state->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(&i2c->dev, &state->gc, state);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to create gpiochip: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ mutex_destroy(&state->lock);
+
+ return ret;
+}
+
+static int attiny_i2c_remove(struct i2c_client *client)
+{
+ struct attiny_lcd *state = i2c_get_clientdata(client);
+
+ mutex_destroy(&state->lock);
+
return 0;
}
@@ -205,6 +407,7 @@ static struct i2c_driver attiny_regulator_driver = {
.of_match_table = of_match_ptr(attiny_dt_ids),
},
.probe = attiny_i2c_probe,
+ .remove = attiny_i2c_remove,
};
module_i2c_driver(attiny_regulator_driver);
diff --git a/drivers/regulator/rt5190a-regulator.c b/drivers/regulator/rt5190a-regulator.c
new file mode 100644
index 000000000000..155d4afd00b1
--- /dev/null
+++ b/drivers/regulator/rt5190a-regulator.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <dt-bindings/regulator/richtek,rt5190a-regulator.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT5190A_REG_MANUFACTURE 0x00
+#define RT5190A_REG_BUCK2VSEL 0x04
+#define RT5190A_REG_BUCK3VSEL 0x05
+#define RT5190A_REG_DCDCCNTL 0x06
+#define RT5190A_REG_ENABLE 0x07
+#define RT5190A_REG_DISCHARGE 0x09
+#define RT5190A_REG_PROTMODE 0x0A
+#define RT5190A_REG_MUTECNTL 0x0B
+#define RT5190A_REG_PGSTAT 0x0F
+#define RT5190A_REG_OVINT 0x10
+#define RT5190A_REG_HOTDIEMASK 0x17
+
+#define RT5190A_VSEL_MASK GENMASK(6, 0)
+#define RT5190A_RID_BITMASK(rid) BIT(rid + 1)
+#define RT5190A_BUCK1_DISCHG_MASK GENMASK(1, 0)
+#define RT5190A_BUCK1_DISCHG_ONVAL 0x01
+#define RT5190A_OVERVOLT_MASK GENMASK(7, 0)
+#define RT5190A_UNDERVOLT_MASK GENMASK(15, 8)
+#define RT5190A_CH234OT_MASK BIT(29)
+#define RT5190A_CHIPOT_MASK BIT(28)
+
+#define RT5190A_BUCK23_MINUV 600000
+#define RT5190A_BUCK23_MAXUV 1400000
+#define RT5190A_BUCK23_STEPUV 10000
+#define RT5190A_BUCK23_STEPNUM ((1400000 - 600000) / 10000 + 1)
+
+enum {
+ RT5190A_IDX_BUCK1 = 0,
+ RT5190A_IDX_BUCK2,
+ RT5190A_IDX_BUCK3,
+ RT5190A_IDX_BUCK4,
+ RT5190A_IDX_LDO,
+ RT5190A_MAX_IDX
+};
+
+struct rt5190a_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_desc rdesc[RT5190A_MAX_IDX];
+ struct regulator_dev *rdev[RT5190A_MAX_IDX];
+};
+
+static int rt5190a_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int pgood_stat;
+ int ret;
+
+ ret = regmap_read(regmap, RT5190A_REG_PGSTAT, &pgood_stat);
+ if (ret)
+ return ret;
+
+ if (!(pgood_stat & RT5190A_RID_BITMASK(rid)))
+ *flags = REGULATOR_ERROR_FAIL;
+ else
+ *flags = 0;
+
+ return 0;
+}
+
+static int rt5190a_fixed_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int mask = RT5190A_RID_BITMASK(rid), val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = mask;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, RT5190A_REG_DCDCCNTL, mask, val);
+}
+
+static unsigned int rt5190a_fixed_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, RT5190A_REG_DCDCCNTL, &val);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to get mode [%d]\n", ret);
+ return ret;
+ }
+
+ if (val & RT5190A_RID_BITMASK(rid))
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops rt5190a_ranged_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static const struct regulator_ops rt5190a_fixed_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_mode = rt5190a_fixed_buck_set_mode,
+ .get_mode = rt5190a_fixed_buck_get_mode,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static const struct regulator_ops rt5190a_fixed_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static irqreturn_t rt5190a_irq_handler(int irq, void *data)
+{
+ struct rt5190a_priv *priv = data;
+ __le32 raws;
+ unsigned int events, fields;
+ static const struct {
+ unsigned int bitmask;
+ unsigned int report;
+ } event_tbl[] = {
+ { RT5190A_OVERVOLT_MASK, REGULATOR_ERROR_REGULATION_OUT },
+ { RT5190A_UNDERVOLT_MASK, REGULATOR_ERROR_UNDER_VOLTAGE }
+ };
+ int i, j, ret;
+
+ ret = regmap_raw_read(priv->regmap, RT5190A_REG_OVINT, &raws,
+ sizeof(raws));
+ if (ret) {
+ dev_err(priv->dev, "Failed to read events\n");
+ return IRQ_NONE;
+ }
+
+ events = le32_to_cpu(raws);
+
+ ret = regmap_raw_write(priv->regmap, RT5190A_REG_OVINT, &raws,
+ sizeof(raws));
+ if (ret)
+ dev_err(priv->dev, "Failed to write-clear events\n");
+
+ /* Handle OV,UV events */
+ for (i = 0; i < ARRAY_SIZE(event_tbl); i++) {
+ fields = events & event_tbl[i].bitmask;
+ fields >>= ffs(event_tbl[i].bitmask) - 1;
+
+ for (j = 0; j < RT5190A_MAX_IDX; j++) {
+ if (!(fields & RT5190A_RID_BITMASK(j)))
+ continue;
+
+ regulator_notifier_call_chain(priv->rdev[j],
+ event_tbl[i].report,
+ NULL);
+ }
+ }
+
+ /* Handle CH234 OT event */
+ if (events & RT5190A_CH234OT_MASK) {
+ for (j = RT5190A_IDX_BUCK2; j < RT5190A_IDX_LDO; j++) {
+ regulator_notifier_call_chain(priv->rdev[j],
+ REGULATOR_ERROR_OVER_TEMP,
+ NULL);
+ }
+ }
+
+ /* Warning if CHIP OT occur */
+ if (events & RT5190A_CHIPOT_MASK)
+ dev_warn(priv->dev, "CHIP overheat\n");
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int rt5190a_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RT5190A_OPMODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case RT5190A_OPMODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int rt5190a_of_parse_cb(struct rt5190a_priv *priv, int rid,
+ struct of_regulator_match *match)
+{
+ struct regulator_desc *desc = priv->rdesc + rid;
+ struct regulator_init_data *init_data = match->init_data;
+ struct device_node *np = match->of_node;
+ bool latchup_enable;
+ unsigned int mask = RT5190A_RID_BITMASK(rid), val;
+
+ switch (rid) {
+ case RT5190A_IDX_BUCK1:
+ case RT5190A_IDX_BUCK4:
+ case RT5190A_IDX_LDO:
+ init_data->constraints.apply_uV = 0;
+
+ if (init_data->constraints.min_uV ==
+ init_data->constraints.max_uV)
+ desc->fixed_uV = init_data->constraints.min_uV;
+ else {
+ dev_err(priv->dev,
+ "Variable voltage for fixed regulator\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ latchup_enable = of_property_read_bool(np, "richtek,latchup-enable");
+
+ /* latchup: 0, default hiccup: 1 */
+ val = !latchup_enable ? mask : 0;
+
+ return regmap_update_bits(priv->regmap, RT5190A_REG_PROTMODE, mask, val);
+}
+
+static void rt5190a_fillin_regulator_desc(struct regulator_desc *desc, int rid)
+{
+ static const char * const regu_name[] = { "buck1", "buck2",
+ "buck3", "buck4",
+ "ldo" };
+ static const char * const supply[] = { NULL, "vin2", "vin3", "vin4",
+ "vinldo" };
+
+ desc->name = regu_name[rid];
+ desc->supply_name = supply[rid];
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->id = rid;
+ desc->enable_reg = RT5190A_REG_ENABLE;
+ desc->enable_mask = RT5190A_RID_BITMASK(rid);
+ desc->active_discharge_reg = RT5190A_REG_DISCHARGE;
+ desc->active_discharge_mask = RT5190A_RID_BITMASK(rid);
+ desc->active_discharge_on = RT5190A_RID_BITMASK(rid);
+
+ switch (rid) {
+ case RT5190A_IDX_BUCK1:
+ desc->active_discharge_mask = RT5190A_BUCK1_DISCHG_MASK;
+ desc->active_discharge_on = RT5190A_BUCK1_DISCHG_ONVAL;
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_buck_ops;
+ desc->of_map_mode = rt5190a_of_map_mode;
+ break;
+ case RT5190A_IDX_BUCK2:
+ desc->vsel_reg = RT5190A_REG_BUCK2VSEL;
+ desc->vsel_mask = RT5190A_VSEL_MASK;
+ desc->min_uV = RT5190A_BUCK23_MINUV;
+ desc->uV_step = RT5190A_BUCK23_STEPUV;
+ desc->n_voltages = RT5190A_BUCK23_STEPNUM;
+ desc->ops = &rt5190a_ranged_buck_ops;
+ break;
+ case RT5190A_IDX_BUCK3:
+ desc->vsel_reg = RT5190A_REG_BUCK3VSEL;
+ desc->vsel_mask = RT5190A_VSEL_MASK;
+ desc->min_uV = RT5190A_BUCK23_MINUV;
+ desc->uV_step = RT5190A_BUCK23_STEPUV;
+ desc->n_voltages = RT5190A_BUCK23_STEPNUM;
+ desc->ops = &rt5190a_ranged_buck_ops;
+ break;
+ case RT5190A_IDX_BUCK4:
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_buck_ops;
+ desc->of_map_mode = rt5190a_of_map_mode;
+ break;
+ case RT5190A_IDX_LDO:
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_ldo_ops;
+ break;
+ }
+}
+
+static struct of_regulator_match rt5190a_regulator_match[] = {
+ { .name = "buck1", },
+ { .name = "buck2", },
+ { .name = "buck3", },
+ { .name = "buck4", },
+ { .name = "ldo", }
+};
+
+static int rt5190a_parse_regulator_dt_data(struct rt5190a_priv *priv)
+{
+ struct device_node *regulator_np;
+ struct regulator_desc *reg_desc;
+ struct of_regulator_match *match;
+ int i, ret;
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ reg_desc = priv->rdesc + i;
+ match = rt5190a_regulator_match + i;
+
+ rt5190a_fillin_regulator_desc(reg_desc, i);
+
+ match->desc = reg_desc;
+ }
+
+ regulator_np = of_get_child_by_name(priv->dev->of_node, "regulators");
+ if (!regulator_np) {
+ dev_err(priv->dev, "Could not find 'regulators' node\n");
+ return -ENODEV;
+ }
+
+ ret = of_regulator_match(priv->dev, regulator_np,
+ rt5190a_regulator_match,
+ ARRAY_SIZE(rt5190a_regulator_match));
+
+ of_node_put(regulator_np);
+
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Error parsing regulator init data: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ match = rt5190a_regulator_match + i;
+
+ ret = rt5190a_of_parse_cb(priv, i, match);
+ if (ret) {
+ dev_err(priv->dev, "Failed in [%d] of_parse_cb\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct reg_sequence rt5190a_init_patch[] = {
+ { 0x09, 0x3d, },
+ { 0x0a, 0x3e, },
+ { 0x0b, 0x01, },
+ { 0x10, 0xff, },
+ { 0x11, 0xff, },
+ { 0x12, 0xff, },
+ { 0x13, 0xff, },
+ { 0x14, 0, },
+ { 0x15, 0, },
+ { 0x16, 0x3e, },
+ { 0x17, 0, }
+};
+
+static int rt5190a_device_initialize(struct rt5190a_priv *priv)
+{
+ bool mute_enable;
+ int ret;
+
+ ret = regmap_register_patch(priv->regmap, rt5190a_init_patch,
+ ARRAY_SIZE(rt5190a_init_patch));
+ if (ret) {
+ dev_err(priv->dev, "Failed to do register patch\n");
+ return ret;
+ }
+
+ mute_enable = device_property_read_bool(priv->dev,
+ "richtek,mute-enable");
+
+ if (mute_enable) {
+ ret = regmap_write(priv->regmap, RT5190A_REG_MUTECNTL, 0x00);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable mute function\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rt5190a_device_check(struct rt5190a_priv *priv)
+{
+ u16 devid;
+ int ret;
+
+ ret = regmap_raw_read(priv->regmap, RT5190A_REG_MANUFACTURE, &devid,
+ sizeof(devid));
+ if (ret)
+ return ret;
+
+ if (devid) {
+ dev_err(priv->dev, "Incorrect device id 0x%04x\n", devid);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config rt5190a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5190A_REG_HOTDIEMASK,
+};
+
+static int rt5190a_probe(struct i2c_client *i2c)
+{
+ struct rt5190a_priv *priv;
+ struct regulator_config cfg = {};
+ int i, ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &i2c->dev;
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &rt5190a_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&i2c->dev, "Failed to allocate regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = rt5190a_device_check(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to check device %d\n", ret);
+ return ret;
+ }
+
+ ret = rt5190a_device_initialize(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to initialize the device\n");
+ return ret;
+ }
+
+ ret = rt5190a_parse_regulator_dt_data(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to parse regulator dt\n");
+ return ret;
+ }
+
+ cfg.dev = &i2c->dev;
+ cfg.regmap = priv->regmap;
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ struct regulator_desc *desc = priv->rdesc + i;
+ struct of_regulator_match *match = rt5190a_regulator_match + i;
+
+ cfg.init_data = match->init_data;
+ cfg.of_node = match->of_node;
+
+ priv->rdev[i] = devm_regulator_register(&i2c->dev, desc, &cfg);
+ if (IS_ERR(priv->rdev[i])) {
+ dev_err(&i2c->dev, "Failed to register regulator %s\n",
+ desc->name);
+ return PTR_ERR(priv->rdev[i]);
+ }
+ }
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5190a_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&i2c->dev), priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to register interrupt\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt5190a_device_table[] = {
+ { .compatible = "richtek,rt5190a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt5190a_device_table);
+
+static struct i2c_driver rt5190a_driver = {
+ .driver = {
+ .name = "rt5190a",
+ .of_match_table = rt5190a_device_table,
+ },
+ .probe_new = rt5190a_probe,
+};
+module_i2c_driver(rt5190a_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5190A Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
index 0f21f95c8981..71e5ceb679f4 100644
--- a/drivers/regulator/sc2731-regulator.c
+++ b/drivers/regulator/sc2731-regulator.c
@@ -1,4 +1,4 @@
- //SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*/
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 2931a0b89bff..bd7b2f287250 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -42,7 +42,7 @@
/**
* struct ti_abb_info - ABB information per voltage setting
* @opp_sel: one of TI_ABB macro
- * @vset: (optional) vset value that LDOVBB needs to be overriden with.
+ * @vset: (optional) vset value that LDOVBB needs to be overridden with.
*
* Array of per voltage entries organized in the same order as regulator_desc's
* volt_table list. (selector is used to index from this array)
@@ -484,7 +484,7 @@ static int ti_abb_init_timings(struct device *dev, struct ti_abb *abb)
/* Calculate cycle rate */
cycle_rate = DIV_ROUND_CLOSEST(clock_cycles * 10, clk_rate);
- /* Calulate SR2_WTCNT_VALUE */
+ /* Calculate SR2_WTCNT_VALUE */
sr2_wt_cnt_val = DIV_ROUND_CLOSEST(abb->settling_time * 10, cycle_rate);
dev_dbg(dev, "%s: Clk_rate=%ld, sr2_cnt=0x%08x\n", __func__,
@@ -688,7 +688,7 @@ MODULE_DEVICE_TABLE(of, ti_abb_of_match);
* @pdev: ABB platform device
*
* Initializes an individual ABB LDO for required Body-Bias. ABB is used to
- * addional bias supply to SoC modules for power savings or mandatory stability
+ * additional bias supply to SoC modules for power savings or mandatory stability
* configuration at certain Operating Performance Points(OPPs).
*
* Return: 0 on success or appropriate error value when fails
diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c
new file mode 100644
index 000000000000..e29deda30d75
--- /dev/null
+++ b/drivers/regulator/tps6286x-regulator.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright Axis Communications AB
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+
+#include <dt-bindings/regulator/ti,tps62864.h>
+
+#define TPS6286X_VOUT1 0x01
+#define TPS6286X_VOUT1_VO1_SET GENMASK(7, 0)
+
+#define TPS6286X_CONTROL 0x03
+#define TPS6286X_CONTROL_FPWM BIT(4)
+#define TPS6286X_CONTROL_SWEN BIT(5)
+
+#define TPS6286X_MIN_MV 400
+#define TPS6286X_MAX_MV 1675
+#define TPS6286X_STEP_MV 5
+
+static const struct regmap_config tps6286x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int tps6286x_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = TPS6286X_CONTROL_FPWM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, TPS6286X_CONTROL,
+ TPS6286X_CONTROL_FPWM, val);
+}
+
+static unsigned int tps6286x_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, TPS6286X_CONTROL, &val);
+ if (ret < 0)
+ return 0;
+
+ return (val & TPS6286X_CONTROL_FPWM) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops tps6286x_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps6286x_set_mode,
+ .get_mode = tps6286x_get_mode,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static unsigned int tps6286x_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case TPS62864_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case TPS62864_MODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static const struct regulator_desc tps6286x_reg = {
+ .name = "tps6286x",
+ .of_match = of_match_ptr("SW"),
+ .owner = THIS_MODULE,
+ .ops = &tps6286x_regulator_ops,
+ .of_map_mode = tps6286x_of_map_mode,
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ((TPS6286X_MAX_MV - TPS6286X_MIN_MV) / TPS6286X_STEP_MV) + 1,
+ .min_uV = TPS6286X_MIN_MV * 1000,
+ .uV_step = TPS6286X_STEP_MV * 1000,
+ .vsel_reg = TPS6286X_VOUT1,
+ .vsel_mask = TPS6286X_VOUT1_VO1_SET,
+ .enable_reg = TPS6286X_CONTROL,
+ .enable_mask = TPS6286X_CONTROL_SWEN,
+ .ramp_delay = 1000,
+ /* tDelay + tRamp, rounded up */
+ .enable_time = 3000,
+};
+
+static const struct of_device_id tps6286x_dt_ids[] = {
+ { .compatible = "ti,tps62864", },
+ { .compatible = "ti,tps62866", },
+ { .compatible = "ti,tps62868", },
+ { .compatible = "ti,tps62869", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps6286x_dt_ids);
+
+static int tps6286x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &tps6286x_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ config.dev = &i2c->dev;
+ config.of_node = dev->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&i2c->dev, &tps6286x_reg, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register tps6286x regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id tps6286x_i2c_id[] = {
+ { "tps62864", 0 },
+ { "tps62866", 0 },
+ { "tps62868", 0 },
+ { "tps62869", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, tps6286x_i2c_id);
+
+static struct i2c_driver tps6286x_regulator_driver = {
+ .driver = {
+ .name = "tps6286x",
+ .of_match_table = of_match_ptr(tps6286x_dt_ids),
+ },
+ .probe = tps6286x_i2c_probe,
+ .id_table = tps6286x_i2c_id,
+};
+
+module_i2c_driver(tps6286x_regulator_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index d2a37978fc3a..aac7be3b33f7 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -185,10 +185,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
unsigned int next_sel;
int delay;
- if (selector >= vctrl->vtable[vctrl->sel].ovp_min_sel)
- next_sel = selector;
- else
- next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
+ next_sel = max_t(unsigned int, selector, vctrl->vtable[vctrl->sel].ovp_min_sel);
ret = regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl->vtable[next_sel].ctrl,
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 52c5a0e0acd8..5d32628a5011 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -13,6 +13,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
struct virtual_consumer_data {
struct mutex lock;
@@ -281,26 +282,53 @@ static const struct attribute_group regulator_virtual_attr_group = {
.attrs = regulator_virtual_attributes,
};
+#ifdef CONFIG_OF
+static const struct of_device_id regulator_virtual_consumer_of_match[] = {
+ { .compatible = "regulator-virtual-consumer" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, regulator_virtual_consumer_of_match);
+#endif
+
static int regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = dev_get_platdata(&pdev->dev);
struct virtual_consumer_data *drvdata;
+ static bool warned;
int ret;
+ if (!warned) {
+ warned = true;
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** regulator-virtual-consumer is only for testing and **\n");
+ pr_warn("** debugging. Do not use it in a production kernel. **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+ }
+
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct virtual_consumer_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
+ /*
+ * This virtual consumer does not have any hardware-defined supply
+ * name, so just allow the regulator to be specified in a property
+ * named "default-supply" when we're being probed from devicetree.
+ */
+ if (!reg_id && pdev->dev.of_node)
+ reg_id = "default";
+
mutex_init(&drvdata->lock);
drvdata->regulator = devm_regulator_get(&pdev->dev, reg_id);
- if (IS_ERR(drvdata->regulator)) {
- ret = PTR_ERR(drvdata->regulator);
- dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
- reg_id, ret);
- return ret;
- }
+ if (IS_ERR(drvdata->regulator))
+ return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->regulator),
+ "Failed to obtain supply '%s'\n",
+ reg_id);
ret = sysfs_create_group(&pdev->dev.kobj,
&regulator_virtual_attr_group);
@@ -334,6 +362,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
.remove = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
+ .of_match_table = of_match_ptr(regulator_virtual_consumer_of_match),
},
};
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 6579bfdb0c26..b1d5aac8917d 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1112,7 +1112,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
if (pdev->id < WM8350_DCDC_1 || pdev->id > WM8350_ISINK_B)
return -ENODEV;
- /* do any regulatior specific init */
+ /* do any regulator specific init */
switch (pdev->id) {
case WM8350_DCDC_1:
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 3ddd426fc969..166019786653 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -180,6 +180,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -199,6 +200,7 @@ config QCOM_Q6V5_MSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -218,6 +220,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -239,6 +242,7 @@ config QCOM_Q6V5_WCSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index eada7e34f3af..442a388f8102 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index d6214cb66026..5663cf799c95 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -93,7 +93,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
/* wake up any blocked readers */
wake_up_interruptible(&eptdev->readq);
- device_del(&eptdev->dev);
+ cdev_device_del(&eptdev->cdev, &eptdev->dev);
put_device(&eptdev->dev);
return 0;
@@ -336,7 +336,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ept_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
- cdev_del(&eptdev->cdev);
kfree(eptdev);
}
@@ -381,19 +380,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
dev->id = ret;
dev_set_name(dev, "rpmsg%d", ret);
- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
if (ret)
goto free_ept_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_eptdev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
return ret;
free_ept_ida:
@@ -462,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
- cdev_del(&ctrldev->cdev);
kfree(ctrldev);
}
@@ -497,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
dev->id = ret;
dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
if (ret)
goto free_ctrl_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_ctrldev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
dev_set_drvdata(&rpdev->dev, ctrldev);
return ret;
@@ -535,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
- device_del(&ctrldev->dev);
+ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev);
}
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 2f83adef966e..6d66ab5a8b17 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -185,10 +185,9 @@ static int ds1302_probe(struct spi_device *spi)
return 0;
}
-static int ds1302_remove(struct spi_device *spi)
+static void ds1302_remove(struct spi_device *spi)
{
spi_set_drvdata(spi, NULL);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 9ef107b99b65..ed9360486953 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -720,7 +720,7 @@ static int ds1305_probe(struct spi_device *spi)
return 0;
}
-static int ds1305_remove(struct spi_device *spi)
+static void ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
@@ -730,8 +730,6 @@ static int ds1305_remove(struct spi_device *spi)
devm_free_irq(&spi->dev, spi->irq, ds1305);
cancel_work_sync(&ds1305->work);
}
-
- return 0;
}
static struct spi_driver ds1305_driver = {
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index f14ed6c96437..ed5a6ba89a3e 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -434,11 +434,9 @@ static int ds1343_probe(struct spi_device *spi)
return 0;
}
-static int ds1343_remove(struct spi_device *spi)
+static void ds1343_remove(struct spi_device *spi)
{
dev_pm_clear_wake_irq(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8b458010f88a..3b7af00a7825 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 61ecdcb2cc6a..2a9c0ddcade5 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index a05a4297cfae..af82b3214774 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -6,7 +6,6 @@
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/genhd.h>
#include <linux/list.h>
#include <asm/debug.h>
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index cd938a26b76c..3b1cd0c96a74 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch,
else
path_event[chpid] = PE_NONE;
}
- if (cdev)
+ if (cdev && cdev->drv && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
break;
}
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d24cafe02708..511bf8e0a436 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out;
}
+ /* re-init to undo drop from zfcp_fc_adisc() */
+ port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
+ u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+ d_id = port->d_id; /* remember as destination for send els below */
+ /*
+ * Force fresh GID_PN lookup on next port recovery.
+ * Must happen after request setup and before sending request,
+ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
+ */
+ port->d_id = 0;
+
+ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b9482da79512..3ebe66151dcb 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3ad3ebaca8e9..ad4972c0fc53 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
struct scsi_cmnd *SCp = hostdata->cmd;
handled = 1;
- SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 440ef32be048..e5aa982ffedc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@@ -1560,9 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
- DMA_BIT_MASK(32));
- if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 71fa62bd3083..a826456c6075 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
-static void bnx2fc_destroy_work(struct work_struct *work);
+static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
@@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ put_cpu();
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
@@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ crc_err = (stats->InvalidCRCCount++);
+ put_cpu();
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
@@ -907,9 +912,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
-
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
return;
default:
@@ -1215,8 +1217,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1525,7 +1527,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1653,8 +1654,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@@ -1694,15 +1695,12 @@ netdev_err:
return rc;
}
-static void bnx2fc_destroy_work(struct work_struct *work)
+static void bnx2fc_port_destroy(struct fcoe_port *port)
{
- struct fcoe_port *port;
struct fc_lport *lport;
- port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
-
- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -2556,9 +2554,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
-
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
index 7bb4f9aad2c8..84bc81d7ce76 100644
--- a/drivers/scsi/elx/libefc/efc_els.c
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
efc = node->efc;
- spin_lock_irqsave(&node->els_ios_lock, flags);
-
if (!node->els_io_enabled) {
efc_log_err(efc, "els io alloc disabled\n");
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
if (!els) {
atomic_add_return(1, &efc->els_io_alloc_failed_count);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@@ -74,7 +70,6 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
&els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* add els structure to ELS IO list */
INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
}
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return els;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 88c549f257db..40a52feb315d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -986,8 +986,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE;
- spin_unlock_irqrestore(io_lock, flags);
-
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
@@ -996,8 +994,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
-
cmd_trace = ((u64)hdr_status << 56) |
(u64)icmnd_cmpl->scsi_status << 48 |
(u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
@@ -1021,6 +1017,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ /* Call SCSI completion function to complete the IO */
+ scsi_done(sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1049,9 +1051,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
}
-
- /* Call SCSI completion function to complete the IO */
- scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a05ec7aece5a..ebf5ec38891b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -400,8 +400,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_dq *dq,
struct hisi_sas_device *sas_dev,
- struct hisi_sas_internal_abort *abort,
- struct hisi_sas_tmf_task *tmf)
+ struct hisi_sas_internal_abort *abort)
{
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue;
@@ -427,8 +426,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->tmf = tmf;
- slot->is_internal = tmf;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -587,7 +584,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
slot->is_internal = tmf;
/* protect task_prep and start_delivery sequence */
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
return 0;
@@ -1380,12 +1377,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
+ struct hisi_sas_tmf_task tmf = {};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
@@ -1396,7 +1394,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
@@ -2067,7 +2065,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot->port = port;
slot->is_internal = true;
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
return 0;
@@ -2666,9 +2664,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (error)
- error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a45ef9a5e12e..a01a3a7b706b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -4695,8 +4695,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4878c94761f9..98cabe09c040 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -592,6 +592,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -1161,6 +1162,16 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -1182,9 +1193,6 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7a7f17d71811..fa8415259cb8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -3978,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index db5ccae1b63d..f936833c9909 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a56f01f659f8..558f7d2559c4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7d717a4ac14d..fdf5e777bf11 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1bc0db572d9e..430abebf99f1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 511726f92d9a..76229b839560 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2011,9 +2011,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
enable_irq(reply_q->os_irq);
}
}
+
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
- if (poll)
- _base_process_reply_queue(reply_q);
}
/**
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 253ceca54a84..7eb8c39da366 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2267,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
- cs->disable_intr(cs);
+ if (cs->disable_intr)
+ cs->disable_intr(cs);
iounmap(cs->mmio_base);
cs->mmio_base = NULL;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index c814e5071712..9ec310b795c3 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2692,7 +2692,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2724,8 +2723,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2767,7 +2764,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2853,20 +2849,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 160ee8b228c9..32edda3e55c6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -769,8 +769,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
+
+ if (ccb)
+ ccb->task = NULL;
goto ex_err;
}
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index bbf538fe15b3..9d20f8009b89 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2185,9 +2185,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
@@ -2794,9 +2794,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
@@ -2821,7 +2821,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2854,8 +2853,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2904,11 +2901,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
return;
}
break;
@@ -3008,24 +3000,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -3931,6 +3905,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
@@ -4151,10 +4126,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u32 ret = MPI_IO_STATUS_FAIL;
u32 regval;
+ /*
+ * Fatal errors are programmed to be signalled in irq vector
+ * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
+ * fatal_err_interrupt
+ */
if (vec == (pm8001_ha->max_q_num - 1)) {
+ u32 mipsall_ready;
+
+ if (pm8001_ha->chip_id == chip_8008 ||
+ pm8001_ha->chip_id == chip_8009)
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
+ else
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
+
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
- SCRATCH_PAD_MIPSALL_READY) {
+ if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c7e5d93bea92..c41ed039c92a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1405,8 +1405,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
-#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
+ SCRATCH_PAD_RAAE_READY)
+#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 99a56ca1fb16..fab43dabe5b3 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2250,6 +2250,7 @@ process_els:
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index cdc66e2a9488..6ad28bc8e948 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
@@ -1864,6 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
@@ -3980,7 +3981,9 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 5916ed7662d5..4eb89aa4a39d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL;
}
}
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (!found) {
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ if (!found)
goto check_cleanup_reqs;
- }
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV;
unlock:
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue);
return;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2104973a35cd..911cc72dd7ac 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/genhd.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0a70aa763a96..e30bc51578e9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1276,7 +1276,7 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
* power management commands.
*/
if (req && !(req->rq_flags & RQF_PM))
- return BLK_STS_IOERR;
+ return BLK_STS_OFFLINE;
return BLK_STS_OK;
}
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3520b9384428..f4e6c68ac99e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
@@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (sbitmap_init_node(&sdev->budget_map,
- scsi_device_max_queue_depth(sdev),
- sbitmap_calculate_shift(depth),
- GFP_KERNEL, sdev->request_queue->node,
- false, true)) {
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
@@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 0ffdb8f2995f..acdc0aceca5e 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 62eb9921cc94..73e6f5f0f37c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -122,11 +121,6 @@ static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sd_ref_mutex);
-
static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
@@ -664,33 +658,6 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp = NULL;
-
- mutex_lock(&sd_ref_mutex);
-
- if (disk->private_data) {
- sdkp = scsi_disk(disk);
- if (scsi_device_get(sdkp->device) == 0)
- get_device(&sdkp->dev);
- else
- sdkp = NULL;
- }
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static void scsi_disk_put(struct scsi_disk *sdkp)
-{
- struct scsi_device *sdev = sdkp->device;
-
- mutex_lock(&sd_ref_mutex);
- put_device(&sdkp->dev);
- scsi_device_put(sdev);
- mutex_unlock(&sd_ref_mutex);
-}
-
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
@@ -1419,17 +1386,15 @@ static bool sd_need_revalidate(struct block_device *bdev,
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
- struct scsi_device *sdev;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
int retval;
- if (!sdkp)
+ if (scsi_device_get(sdev))
return -ENXIO;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
- sdev = sdkp->device;
-
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
@@ -1474,7 +1439,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
return 0;
error_out:
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
return retval;
}
@@ -1503,7 +1468,7 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
}
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1617,7 +1582,7 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_disk *sdkp = disk->private_data;
struct scsi_device *sdp;
int retval;
bool disk_changed;
@@ -1680,7 +1645,6 @@ out:
*/
disk_changed = sdp->changed;
sdp->changed = 0;
- scsi_disk_put(sdkp);
return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
@@ -1888,6 +1852,13 @@ static const struct pr_ops sd_pr_ops = {
.pr_clear = sd_pr_clear,
};
+static void scsi_disk_free_disk(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ put_device(&sdkp->disk_dev);
+}
+
static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
@@ -1899,6 +1870,7 @@ static const struct block_device_operations sd_fops = {
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
.get_unique_id = sd_get_unique_id,
+ .free_disk = scsi_disk_free_disk,
.pr_ops = &sd_pr_ops,
};
@@ -3516,7 +3488,6 @@ static int sd_probe(struct device *dev)
}
sdkp->device = sdp;
- sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
sdkp->max_retries = SD_MAX_RETRIES;
@@ -3531,14 +3502,14 @@ static int sd_probe(struct device *dev)
SD_MOD_TIMEOUT);
}
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = get_device(dev);
- sdkp->dev.class = &sd_disk_class;
- dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+ device_initialize(&sdkp->disk_dev);
+ sdkp->disk_dev.parent = get_device(dev);
+ sdkp->disk_dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
- error = device_add(&sdkp->dev);
+ error = device_add(&sdkp->disk_dev);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
goto out;
}
@@ -3549,7 +3520,7 @@ static int sd_probe(struct device *dev)
gd->minors = SD_MINORS;
gd->fops = &sd_fops;
- gd->private_data = &sdkp->driver;
+ gd->private_data = sdkp;
/* defaults, until the device tells us otherwise */
sdp->sector_size = 512;
@@ -3579,7 +3550,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
goto out;
}
@@ -3625,58 +3596,26 @@ static int sd_probe(struct device *dev)
**/
static int sd_remove(struct device *dev)
{
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- device_del(&sdkp->dev);
+ device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- free_opal_dev(sdkp->opal_dev);
-
- mutex_lock(&sd_ref_mutex);
- dev_set_drvdata(dev, NULL);
- put_device(&sdkp->dev);
- mutex_unlock(&sd_ref_mutex);
-
+ put_disk(sdkp->disk);
return 0;
}
-/**
- * scsi_disk_release - Called to free the scsi_disk structure
- * @dev: pointer to embedded class device
- *
- * sd_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_disk_get()
- * scsi_disk_put() helpers which manipulate the semaphore directly
- * and never do a direct put_device.
- **/
static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
ida_free(&sd_index_ida, sdkp->index);
-
- /*
- * Wait until all requests that are in progress have completed.
- * This is necessary to avoid that e.g. scsi_end_request() crashes
- * due to clearing the disk->private_data pointer. Wait from inside
- * scsi_disk_release() instead of from sd_release() to avoid that
- * freezing and unfreezing the request queue affects user space I/O
- * in case multiple processes open a /dev/sd... node concurrently.
- */
- blk_mq_freeze_queue(q);
- blk_mq_unfreeze_queue(q);
-
- disk->private_data = NULL;
- put_disk(disk);
- put_device(&sdkp->device->sdev_gendev);
-
sd_zbc_release_disk(sdkp);
+ put_device(&sdkp->device->sdev_gendev);
+ free_opal_dev(sdkp->opal_dev);
kfree(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2e5932bde43d..0a33a4b68ffb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -68,9 +68,13 @@ enum {
};
struct scsi_disk {
- struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
- struct device dev;
+
+ /*
+ * disk_dev is used to show attributes in /sys/class/scsi_disk/,
+ * but otherwise not really needed. Do not use for refcounting.
+ */
+ struct device disk_dev;
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -127,11 +131,11 @@ struct scsi_disk {
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
-#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_disk, driver);
+ return disk->private_data;
}
#define sd_printk(prefix, sdsk, fmt, a...) \
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f925b1f1f9ad..641552d6330b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -109,11 +109,6 @@ static DEFINE_SPINLOCK(sr_index_lock);
static struct lock_class_key sr_bio_compl_lkclass;
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sr_ref_mutex);
-
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
@@ -143,11 +138,9 @@ static const struct cdrom_device_ops sr_dops = {
.capability = SR_CAPABILITIES,
};
-static void sr_kref_release(struct kref *kref);
-
static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_cd, driver);
+ return disk->private_data;
}
static int sr_runtime_suspend(struct device *dev)
@@ -163,38 +156,6 @@ static int sr_runtime_suspend(struct device *dev)
return 0;
}
-/*
- * The get and put routines for the struct scsi_cd. Note this entity
- * has a scsi_device pointer and owns a reference to this.
- */
-static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
-{
- struct scsi_cd *cd = NULL;
-
- mutex_lock(&sr_ref_mutex);
- if (disk->private_data == NULL)
- goto out;
- cd = scsi_cd(disk);
- kref_get(&cd->kref);
- if (scsi_device_get(cd->device)) {
- kref_put(&cd->kref, sr_kref_release);
- cd = NULL;
- }
- out:
- mutex_unlock(&sr_ref_mutex);
- return cd;
-}
-
-static void scsi_cd_put(struct scsi_cd *cd)
-{
- struct scsi_device *sdev = cd->device;
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- scsi_device_put(sdev);
- mutex_unlock(&sr_ref_mutex);
-}
-
static unsigned int sr_get_events(struct scsi_device *sdev)
{
u8 buf[8];
@@ -522,15 +483,13 @@ static void sr_revalidate_disk(struct scsi_cd *cd)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_cd *cd;
- struct scsi_device *sdev;
- int ret = -ENXIO;
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ int ret;
- cd = scsi_cd_get(bdev->bd_disk);
- if (!cd)
- goto out;
+ if (scsi_device_get(cd->device))
+ return -ENXIO;
- sdev = cd->device;
scsi_autopm_get_device(sdev);
if (bdev_check_media_change(bdev))
sr_revalidate_disk(cd);
@@ -541,9 +500,7 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
scsi_autopm_put_device(sdev);
if (ret)
- scsi_cd_put(cd);
-
-out:
+ scsi_device_put(cd->device);
return ret;
}
@@ -555,7 +512,7 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
cdrom_release(&cd->cdi, mode);
mutex_unlock(&cd->lock);
- scsi_cd_put(cd);
+ scsi_device_put(cd->device);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -595,18 +552,24 @@ out:
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- unsigned int ret = 0;
- struct scsi_cd *cd;
+ struct scsi_cd *cd = disk->private_data;
- cd = scsi_cd_get(disk);
- if (!cd)
+ if (atomic_read(&cd->device->disk_events_disable_depth))
return 0;
+ return cdrom_check_events(&cd->cdi, clearing);
+}
- if (!atomic_read(&cd->device->disk_events_disable_depth))
- ret = cdrom_check_events(&cd->cdi, clearing);
+static void sr_free_disk(struct gendisk *disk)
+{
+ struct scsi_cd *cd = disk->private_data;
- scsi_cd_put(cd);
- return ret;
+ spin_lock(&sr_index_lock);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ spin_unlock(&sr_index_lock);
+
+ unregister_cdrom(&cd->cdi);
+ mutex_destroy(&cd->lock);
+ kfree(cd);
}
static const struct block_device_operations sr_bdops =
@@ -617,6 +580,7 @@ static const struct block_device_operations sr_bdops =
.ioctl = sr_block_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
+ .free_disk = sr_free_disk,
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -660,8 +624,6 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- kref_init(&cd->kref);
-
disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
&sr_bio_compl_lkclass);
if (!disk)
@@ -692,7 +654,6 @@ static int sr_probe(struct device *dev)
cd->device = sdev;
cd->disk = disk;
- cd->driver = &sr_template;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
cd->media_present = 1;
@@ -713,7 +674,7 @@ static int sr_probe(struct device *dev)
sr_vendor_init(cd);
set_capacity(disk, cd->capacity);
- disk->private_data = &cd->driver;
+ disk->private_data = cd;
if (register_cdrom(disk, &cd->cdi))
goto fail_minor;
@@ -728,10 +689,8 @@ static int sr_probe(struct device *dev)
sr_revalidate_disk(cd);
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
- if (error) {
- kref_put(&cd->kref, sr_kref_release);
- goto fail;
- }
+ if (error)
+ goto unregister_cdrom;
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -739,6 +698,8 @@ static int sr_probe(struct device *dev)
return 0;
+unregister_cdrom:
+ unregister_cdrom(&cd->cdi);
fail_minor:
spin_lock(&sr_index_lock);
clear_bit(minor, sr_index_bits);
@@ -1010,36 +971,6 @@ out_put_request:
return ret;
}
-
-/**
- * sr_kref_release - Called to free the scsi_cd structure
- * @kref: pointer to embedded kref
- *
- * sr_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_cd_get()
- * scsi_cd_put() helpers which manipulate the semaphore directly
- * and never do a direct kref_put().
- **/
-static void sr_kref_release(struct kref *kref)
-{
- struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
- struct gendisk *disk = cd->disk;
-
- spin_lock(&sr_index_lock);
- clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
- spin_unlock(&sr_index_lock);
-
- unregister_cdrom(&cd->cdi);
-
- disk->private_data = NULL;
-
- put_disk(disk);
-
- mutex_destroy(&cd->lock);
-
- kfree(cd);
-}
-
static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
@@ -1047,11 +978,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
- dev_set_drvdata(dev, NULL);
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- mutex_unlock(&sr_ref_mutex);
+ put_disk(cd->disk);
return 0;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 339c624e04d8..1175f2e213b5 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -18,8 +18,6 @@
#ifndef _SR_H
#define _SR_H
-#include <linux/genhd.h>
-#include <linux/kref.h>
#include <linux/mutex.h>
#define MAX_RETRIES 3
@@ -33,7 +31,6 @@ struct scsi_device;
typedef struct scsi_cd {
- struct scsi_driver *driver;
unsigned capacity; /* size in blocks */
struct scsi_device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
@@ -53,9 +50,6 @@ typedef struct scsi_cd {
struct cdrom_device_info cdi;
struct mutex lock;
- /* We hold gendisk and scsi_device references on probe and use
- * the refs on this kref to decide when to release them */
- struct kref kref;
struct gendisk *disk;
} Scsi_CD;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e869e90e05af..ebe9412c86f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4276,7 +4276,6 @@ static int st_probe(struct device *dev)
goto out_buffer_free;
}
kref_init(&tpnt->kref);
- tpnt->driver = &st_template;
tpnt->device = SDp;
if (SDp->scsi_level <= 2)
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index c0ef0d9aaf8a..7a68eaba7e81 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -117,7 +117,6 @@ struct scsi_tape_stats {
/* The tape drive descriptor */
struct scsi_tape {
- struct scsi_driver *driver;
struct scsi_device *device;
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8b16bbbcb806..87975d1a21c8 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!clki->name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
@@ -127,6 +132,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
return -ENOMEM;
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!vreg->name)
+ return -ENOMEM;
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 460d2b440d2e..9349557b8a01 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
- break;
+ continue;
default:
WARN_ON_ONCE(true);
}
@@ -8613,7 +8613,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
* @pwr_mode: device power mode to set
*
* Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
+ * Returns < 0 if failed to set the requested power mode
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -8667,8 +8667,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (ret > 0 && scsi_sense_valid(&sshdr))
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ if (ret > 0) {
+ if (scsi_sense_valid(&sshdr))
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ ret = -EIO;
+ }
}
if (!ret)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6a295c88d850..a7ff0e5b5494 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -142,7 +142,8 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
+ CRYPTO_ENGINE_FATAL_ERROR |\
+ UIC_LINK_LOST)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT 0x1
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 2d36a0715fca..8970068314ef 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
if (!map_req)
return NULL;
- bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+ bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
if (!bio) {
ufshpb_put_req(hpb, map_req);
return NULL;
@@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&pre_req->list_req);
pre_req->req = NULL;
- pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+ pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
if (!pre_req->bio)
goto release_mem;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 12c10a5e3d93..7f421600cb66 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
return;
for (i = 0; i < shadow->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
+ if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
"grant still in use by backend\n");
BUG();
}
- gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
kfree(shadow->sg);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 72771e018c42..258894ed234b 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
}
lpc_ctrl->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_ctrl->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(lpc_ctrl->clk);
- }
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
rc = clk_prepare_enable(lpc_ctrl->clk);
if (rc) {
dev_err(dev, "couldn't enable clock\n");
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 072473a16f4d..5ed2fc1c53a0 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -28,7 +28,6 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
-static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void)
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
@@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine)
- soc_dev_attr.machine = machine;
+ if (machine) {
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine) {
+ of_node_put(root);
+ return -ENOMEM;
+ }
+ }
+ of_node_put(root);
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
- of_node_put(root);
return 0;
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 4d38c80f8be8..b3c226eb5292 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
* memory mapped space.
* The BRG clock is the QE clock divided by 2.
* It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
@@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base,
for (i = 0; i < be32_to_cpu(ucode->count); i++)
iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
-
+
/* Set I-RAM Ready Register */
iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index e277c827bdf3..a5e2d0e5ab51 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -35,6 +35,8 @@ int par_io_init(struct device_node *np)
if (ret)
return ret;
par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
if (!of_property_read_u32(np, "num-ports", &num_ports))
num_par_io_ports = num_ports;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3e59d479d001..3cb123016b3e 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
return 0;
out_clk_disable:
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+ if (!domain->keep_clocks)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
}
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index 6f0a57044a7b..6aae0b12b6ff 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -53,7 +53,8 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
MT8192_AAL0_SEL_IN_CCORR0
}, {
DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 670cc82d17dc..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -411,17 +411,12 @@ out:
return ret;
}
-static int init_clks(struct platform_device *pdev, struct clk **clk)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
- for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- if (IS_ERR(clk[i]))
- return PTR_ERR(clk[i]);
- }
-
- return 0;
}
static struct scp *init_scp(struct platform_device *pdev,
@@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev,
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
struct clk *clk[CLK_MAX];
@@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev,
pd_data->num_domains = num;
- ret = init_clks(pdev, clk);
- if (ret)
- return ERR_PTR(ret);
+ init_clks(pdev, clk);
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index 25eb2c1e31bb..156ac0e0c8fe 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -34,4 +34,12 @@ config ROCKCHIP_PM_DOMAINS
If unsure, say N.
+config ROCKCHIP_DTPM
+ tristate "Rockchip DTPM hierarchy"
+ depends on DTPM && m
+ help
+ Describe the hierarchy for the Dynamic Thermal Power
+ Management tree on this platform. That will create all the
+ power capping capable devices.
+
endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
index 875032f7344e..05f31a4e743c 100644
--- a/drivers/soc/rockchip/Makefile
+++ b/drivers/soc/rockchip/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o
obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
+obj-$(CONFIG_ROCKCHIP_DTPM) += dtpm.o
diff --git a/drivers/soc/rockchip/dtpm.c b/drivers/soc/rockchip/dtpm.c
new file mode 100644
index 000000000000..5a23784b5221
--- /dev/null
+++ b/drivers/soc/rockchip/dtpm.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * DTPM hierarchy description
+ */
+#include <linux/dtpm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static struct dtpm_node __initdata rk3399_hierarchy[] = {
+ [0]{ .name = "rk3399",
+ .type = DTPM_NODE_VIRTUAL },
+ [1]{ .name = "package",
+ .type = DTPM_NODE_VIRTUAL,
+ .parent = &rk3399_hierarchy[0] },
+ [2]{ .name = "/cpus/cpu@0",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [3]{ .name = "/cpus/cpu@1",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [4]{ .name = "/cpus/cpu@2",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [5]{ .name = "/cpus/cpu@3",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [6]{ .name = "/cpus/cpu@100",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [7]{ .name = "/cpus/cpu@101",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [8]{ .name = "/gpu@ff9a0000",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [9]{ /* sentinel */ }
+};
+
+static struct of_device_id __initdata rockchip_dtpm_match_table[] = {
+ { .compatible = "rockchip,rk3399", .data = rk3399_hierarchy },
+ {},
+};
+
+static int __init rockchip_dtpm_init(void)
+{
+ return dtpm_create_hierarchy(rockchip_dtpm_match_table);
+}
+module_init(rockchip_dtpm_init);
+
+static void __exit rockchip_dtpm_exit(void)
+{
+ return dtpm_destroy_hierarchy();
+}
+module_exit(rockchip_dtpm_exit);
+
+MODULE_SOFTDEP("pre: panfrost cpufreq-dt");
+MODULE_DESCRIPTION("Rockchip DTPM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dtpm");
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@kernel.org");
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index a9f8b224322e..02e319508cc6 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -31,7 +31,7 @@ config EXYNOS_USI
help
Enable support for USI block. USI (Universal Serial Interface) is an
IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
- ExynosAutoV0. USI block can be configured to provide one of the
+ ExynosAutoV9. USI block can be configured to provide one of the
following serial protocols: UART, SPI or High Speed I2C.
This driver allows one to configure USI for desired protocol, which
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 2746d05936d3..0fb3631e7346 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -204,7 +204,7 @@ module_platform_driver(exynos_chipid_driver);
MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b2a8821971e1..31a2cef3790c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -427,6 +427,45 @@ config SPI_INGENIC
To compile this driver as a module, choose M here: the module
will be called spi-ingenic.
+config SPI_INTEL
+ tristate
+
+config SPI_INTEL_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on PCI
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-pci.
+
+config SPI_INTEL_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-platform.
+
config SPI_JCORE
tristate "J-Core SPI Master"
depends on OF && (SUPERH || COMPILE_TEST)
@@ -866,6 +905,17 @@ config SPI_SUN6I
help
This enables using the SPI controller on the Allwinner A31 SoCs.
+config SPI_SUNPLUS_SP7021
+ tristate "Sunplus SP7021 SPI controller"
+ depends on SOC_SP7021 || COMPILE_TEST
+ help
+ This enables Sunplus SP7021 SPI controller driver on the SP7021 SoCs.
+ This driver can also be built as a module. If so, the module will be
+ called as spi-sunplus-sp7021.
+
+ If you have a Sunplus SP7021 platform say Y here.
+ If unsure, say N.
+
config SPI_SYNQUACER
tristate "Socionext's SynQuacer HighSpeed SPI controller"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index dd7393a6046f..3aa28ed3f761 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -61,6 +61,9 @@ obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o
+obj-$(CONFIG_SPI_INTEL) += spi-intel.o
+obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
+obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
@@ -119,6 +122,7 @@ obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_SUNPLUS_SP7021) += spi-sunplus-sp7021.o
obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
obj-$(CONFIG_SPI_TEGRA210_QUAD) += spi-tegra210-quad.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 4b3ac7aceaf6..cba6a4486c24 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -12,12 +12,17 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/iopoll.h>
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
#define AMD_SPI_BUSY BIT(31)
+#define AMD_SPI_OPCODE_REG 0x45
+#define AMD_SPI_CMD_TRIGGER_REG 0x47
+#define AMD_SPI_TRIGGER_CMD BIT(7)
+
#define AMD_SPI_OPCODE_MASK 0xFF
#define AMD_SPI_ALT_CS_REG 0x1D
@@ -34,10 +39,15 @@
#define AMD_SPI_XFER_TX 1
#define AMD_SPI_XFER_RX 2
+enum amd_spi_versions {
+ AMD_SPI_V1 = 1, /* AMDI0061 */
+ AMD_SPI_V2, /* AMDI0062 */
+};
+
struct amd_spi {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
- u32 rom_addr;
+ enum amd_spi_versions version;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
@@ -81,14 +91,29 @@ static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
}
+static inline void amd_spi_clear_chip(struct amd_spi *amd_spi, u8 chip_select)
+{
+ amd_spi_writereg8(amd_spi, AMD_SPI_ALT_CS_REG, chip_select & ~AMD_SPI_ALT_CS_MASK);
+}
+
static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
{
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
}
-static void amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
+static int amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
{
- amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode, AMD_SPI_OPCODE_MASK);
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode,
+ AMD_SPI_OPCODE_MASK);
+ return 0;
+ case AMD_SPI_V2:
+ amd_spi_writereg8(amd_spi, AMD_SPI_OPCODE_REG, cmd_opcode);
+ return 0;
+ default:
+ return -ENODEV;
+ }
}
static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
@@ -103,16 +128,22 @@ static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
static int amd_spi_busy_wait(struct amd_spi *amd_spi)
{
- int timeout = 100000;
-
- /* poll for SPI bus to become idle */
- while (amd_spi_readreg32(amd_spi, AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) {
- usleep_range(10, 20);
- if (timeout-- < 0)
- return -ETIMEDOUT;
+ u32 val;
+ int reg;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ reg = AMD_SPI_CTRL0_REG;
+ break;
+ case AMD_SPI_V2:
+ reg = AMD_SPI_STATUS_REG;
+ break;
+ default:
+ return -ENODEV;
}
- return 0;
+ return readl_poll_timeout(amd_spi->io_remap_addr + reg, val,
+ !(val & AMD_SPI_BUSY), 20, 2000000);
}
static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
@@ -123,10 +154,20 @@ static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
if (ret)
return ret;
- /* Set ExecuteOpCode bit in the CTRL0 register */
- amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, AMD_SPI_EXEC_CMD);
-
- return 0;
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ /* Set ExecuteOpCode bit in the CTRL0 register */
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
+ AMD_SPI_EXEC_CMD);
+ return 0;
+ case AMD_SPI_V2:
+ /* Trigger the command execution */
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_CMD_TRIGGER_REG,
+ AMD_SPI_TRIGGER_CMD, AMD_SPI_TRIGGER_CMD);
+ return 0;
+ default:
+ return -ENODEV;
+ }
}
static int amd_spi_master_setup(struct spi_device *spi)
@@ -196,6 +237,17 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
message->actual_length = tx_len + rx_len + 1;
/* complete the transaction */
message->status = 0;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ break;
+ case AMD_SPI_V2:
+ amd_spi_clear_chip(amd_spi, message->spi->chip_select);
+ break;
+ default:
+ return -ENODEV;
+ }
+
spi_finalize_current_message(master);
return 0;
@@ -241,6 +293,8 @@ static int amd_spi_probe(struct platform_device *pdev)
}
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+ amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev);
+
/* Initialize the spi_master fields */
master->bus_num = 0;
master->num_chipselect = 4;
@@ -266,7 +320,8 @@ err_free_master:
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
- { "AMDI0061", 0 },
+ { "AMDI0061", AMD_SPI_V1 },
+ { "AMDI0062", AMD_SPI_V2 },
{},
};
MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index d1e287d2d9cd..607e7a49fb89 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/clk.h>
@@ -133,6 +134,38 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
+static int ath79_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(mem->spi);
+
+ /* Ensures that reading is performed on device connected to hardware cs0 */
+ if (mem->spi->chip_select || mem->spi->cs_gpiod)
+ return -ENOTSUPP;
+
+ /* Only use for fast-read op. */
+ if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
+ op->addr.nbytes != 3 || op->dummy.nbytes != 1)
+ return -ENOTSUPP;
+
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+
+ memcpy_fromio(op->data.buf.in, sp->base + op->addr.val, op->data.nbytes);
+
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* restore IOC register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ath79_mem_ops = {
+ .exec_op = ath79_exec_mem_op,
+};
+
static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -154,6 +187,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = SPI_MASTER_GPIO_SS;
master->num_chipselect = 3;
+ master->mem_ops = &ath79_mem_ops;
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index c9a769b8594b..86c76211b3d3 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -585,7 +585,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
u32 rd = 0;
u32 wr = 0;
- if (qspi->base[CHIP_SELECT]) {
+ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
wr = (rd & ~0xff) | (1 << cs);
if (rd == wr)
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7d709a8c833b..e28521922330 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -445,25 +444,12 @@ static void bcm2835aux_spi_handle_err(struct spi_master *master,
static int bcm2835aux_spi_setup(struct spi_device *spi)
{
- int ret;
-
/* sanity check for native cs */
if (spi->mode & SPI_NO_CS)
return 0;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* with gpio-cs set the GPIO to the correct level
- * and as output (in case the dt has the gpio not configured
- * as output but native cs)
- */
- ret = gpio_direction_output(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ? 0 : 1);
- if (ret)
- dev_err(&spi->dev,
- "could not set gpio %i as output: %i\n",
- spi->cs_gpio, ret);
-
- return ret;
- }
+
+ if (spi->cs_gpiod)
+ return 0;
/* for dt-backwards compatibility: only support native on CS0
* known things not supported with broken native CS:
@@ -519,6 +505,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
master->prepare_message = bcm2835aux_spi_prepare_message;
master->unprepare_message = bcm2835aux_spi_unprepare_message;
master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index ae61d72c7d28..267342dfa738 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -41,6 +41,8 @@
* chips need ... there may be several reasons you'd need to tweak timings
* in these routines, not just to make it faster or slower to match a
* particular CPU clock rate.
+ *
+ * ToDo: Maybe the bitrev macros can be used to improve the code?
*/
static inline u32
@@ -106,3 +108,67 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
}
return word;
}
+
+static inline u32
+bitbang_txrx_le_cpha0(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on trailing edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on leading edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << (bits - 1);
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_le_cpha1(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on trailing edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << (bits - 1);
+ }
+ return word;
+}
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 4bc1b93fc276..3ab19be83095 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -578,10 +578,8 @@ static int cdns_xspi_probe(struct platform_device *pdev)
}
cdns_xspi->irq = platform_get_irq(pdev, 0);
- if (cdns_xspi->irq < 0) {
- dev_err(dev, "Failed to get IRQ\n");
+ if (cdns_xspi->irq < 0)
return -ENXIO;
- }
ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
IRQF_SHARED, pdev->name, cdns_xspi);
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index b6c7467f0b59..d403a7a3021d 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -25,6 +25,7 @@
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_INIT_TIMEOUT_MS 1000
+#define SPI_FSI_STATUS_TIMEOUT_MS 100
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
@@ -299,6 +300,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
int rc = 0;
+ unsigned long end;
u64 status = 0ULL;
if (transfer->tx_buf) {
@@ -315,10 +317,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
if (rc)
return rc;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
@@ -329,10 +335,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u8 *rx = transfer->rx_buf;
while (transfer->len > recv) {
+ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index f7d905d2a90f..4e83cc5b445d 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -898,11 +898,8 @@ static int spi_geni_probe(struct platform_device *pdev)
return irq;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- return dev_err_probe(dev, ret, "could not set DMA mask\n");
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0584f4d2fde2..4b12c4964a66 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -135,25 +135,37 @@ static inline int getmiso(const struct spi_device *spi)
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*
@@ -170,28 +182,40 @@ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*----------------------------------------------------------------------*/
@@ -378,7 +402,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
- SPI_CS_HIGH;
+ SPI_CS_HIGH | SPI_LSB_FIRST;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/spi/spi-intel-pci.c
index 1bc53b8bb88a..a5ef7a526a7f 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -2,34 +2,48 @@
/*
* Intel PCH/PCU SPI flash PCI driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/ioport.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "intel-spi.h"
+#include "spi-intel.h"
#define BCR 0xdc
#define BCR_WPD BIT(0)
+static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ /* Try to make the chip read/write */
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static const struct intel_spi_boardinfo cnl_info = {
.type = INTEL_SPI_CNL,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
- u32 bcr;
int ret;
ret = pcim_enable_device(pdev);
@@ -41,26 +55,8 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
if (!info)
return -ENOMEM;
- /* Try to make the chip read/write */
- pci_read_config_dword(pdev, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
- }
- info->writeable = !!(bcr & BCR_WPD);
-
- ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- pci_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static void intel_spi_pci_remove(struct pci_dev *pdev)
-{
- intel_spi_remove(pci_get_drvdata(pdev));
+ info->data = pdev;
+ return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
}
static const struct pci_device_id intel_spi_pci_ids[] = {
@@ -70,6 +66,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
@@ -89,7 +86,6 @@ static struct pci_driver intel_spi_pci_driver = {
.name = "intel-spi",
.id_table = intel_spi_pci_ids,
.probe = intel_spi_pci_probe,
- .remove = intel_spi_pci_remove,
};
module_pci_driver(intel_spi_pci_driver);
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/spi/spi-intel-platform.c
index f80f1086f928..2ef09fa35661 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
+++ b/drivers/spi/spi-intel-platform.c
@@ -2,20 +2,18 @@
/*
* Intel PCH/PCU SPI flash platform driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "intel-spi.h"
+#include "spi-intel.h"
static int intel_spi_platform_probe(struct platform_device *pdev)
{
struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
struct resource *mem;
info = dev_get_platdata(&pdev->dev);
@@ -23,24 +21,11 @@ static int intel_spi_platform_probe(struct platform_device *pdev)
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ispi = intel_spi_probe(&pdev->dev, mem, info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- platform_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static int intel_spi_platform_remove(struct platform_device *pdev)
-{
- struct intel_spi *ispi = platform_get_drvdata(pdev);
-
- return intel_spi_remove(ispi);
+ return intel_spi_probe(&pdev->dev, mem, info);
}
static struct platform_driver intel_spi_platform_driver = {
.probe = intel_spi_platform_probe,
- .remove = intel_spi_platform_remove,
.driver = {
.name = "intel-spi",
},
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/spi/spi-intel.c
index a413892ff449..e937cfe85559 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi.c
+++ b/drivers/spi/spi-intel.c
@@ -2,21 +2,21 @@
/*
* Intel PCH/PCU SPI flash driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/err.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sizes.h>
-#include <linux/mtd/mtd.h>
+
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
-#include "intel-spi.h"
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include "spi-intel.h"
/* Offsets are from @ispi->base */
#define BFPREG 0x00
@@ -92,8 +92,6 @@
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
-#define BYT_BCR 0xfc
-#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
#define BYT_PR_NUM 5
@@ -125,37 +123,43 @@
* struct intel_spi - Driver private data
* @dev: Device pointer
* @info: Pointer to board specific info
- * @nor: SPI NOR layer structure
* @base: Beginning of MMIO space
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
+ * @master: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
- * @writeable: Is the chip writeable
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
- * @erase_64k: 64k erase supported
* @atomic_preopcode: Holds preopcode when atomic sequence is requested
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
+ * @mem_ops: Pointer to SPI MEM ops supported by the controller
*/
struct intel_spi {
struct device *dev;
const struct intel_spi_boardinfo *info;
- struct spi_nor nor;
void __iomem *base;
void __iomem *pregs;
void __iomem *sregs;
+ struct spi_controller *master;
size_t nregions;
size_t pr_num;
- bool writeable;
bool locked;
bool swseq_reg;
bool swseq_erase;
- bool erase_64k;
u8 atomic_preopcode;
u8 opcodes[8];
+ const struct intel_spi_mem_op *mem_ops;
+};
+
+struct intel_spi_mem_op {
+ struct spi_mem_op mem_op;
+ u32 replacement_op;
+ int (*exec_op)(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op);
};
static bool writeable;
@@ -201,9 +205,6 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
readl(ispi->sregs + OPMENU1));
}
- if (ispi->info->type == INTEL_SPI_BYT)
- dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
-
dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
@@ -219,9 +220,8 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
base = value & PR_BASE_MASK;
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
- i, base << 12, (limit << 12) | 0xfff,
- value & PR_WPE ? 'W' : '.',
- value & PR_RPE ? 'R' : '.');
+ i, base << 12, (limit << 12) | 0xfff,
+ value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
}
dev_dbg(ispi->dev, "Flash regions:\n");
@@ -236,7 +236,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
dev_dbg(ispi->dev, " %02d disabled\n", i);
else
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
- i, base << 12, (limit << 12) | 0xfff);
+ i, base << 12, (limit << 12) | 0xfff);
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
@@ -304,124 +304,12 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
INTEL_SPI_TIMEOUT * 1000);
}
-static int intel_spi_init(struct intel_spi *ispi)
+static bool intel_spi_set_writeable(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, lvscc, uvscc, val;
- int i;
-
- switch (ispi->info->type) {
- case INTEL_SPI_BYT:
- ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BYT_PR;
- ispi->nregions = BYT_FREG_NUM;
- ispi->pr_num = BYT_PR_NUM;
- ispi->swseq_reg = true;
-
- if (writeable) {
- /* Disable write protection */
- val = readl(ispi->base + BYT_BCR);
- if (!(val & BYT_BCR_WPD)) {
- val |= BYT_BCR_WPD;
- writel(val, ispi->base + BYT_BCR);
- val = readl(ispi->base + BYT_BCR);
- }
-
- ispi->writeable = !!(val & BYT_BCR_WPD);
- }
-
- break;
-
- case INTEL_SPI_LPT:
- ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
- ispi->pregs = ispi->base + LPT_PR;
- ispi->nregions = LPT_FREG_NUM;
- ispi->pr_num = LPT_PR_NUM;
- ispi->swseq_reg = true;
- break;
-
- case INTEL_SPI_BXT:
- ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BXT_PR;
- ispi->nregions = BXT_FREG_NUM;
- ispi->pr_num = BXT_PR_NUM;
- ispi->erase_64k = true;
- break;
-
- case INTEL_SPI_CNL:
- ispi->sregs = NULL;
- ispi->pregs = ispi->base + CNL_PR;
- ispi->nregions = CNL_FREG_NUM;
- ispi->pr_num = CNL_PR_NUM;
- break;
+ if (!ispi->info->set_writeable)
+ return false;
- default:
- return -EINVAL;
- }
-
- /* Disable #SMI generation from HW sequencer */
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~HSFSTS_CTL_FSMIE;
- writel(val, ispi->base + HSFSTS_CTL);
-
- /*
- * Determine whether erase operation should use HW or SW sequencer.
- *
- * The HW sequencer has a predefined list of opcodes, with only the
- * erase opcode being programmable in LVSCC and UVSCC registers.
- * If these registers don't contain a valid erase opcode, erase
- * cannot be done using HW sequencer.
- */
- lvscc = readl(ispi->base + LVSCC);
- uvscc = readl(ispi->base + UVSCC);
- if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
- ispi->swseq_erase = true;
- /* SPI controller on Intel BXT supports 64K erase opcode */
- if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
- if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
- !(uvscc & ERASE_64K_OPCODE_MASK))
- ispi->erase_64k = false;
-
- if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
- dev_err(ispi->dev, "software sequencer not supported, but required\n");
- return -EINVAL;
- }
-
- /*
- * Some controllers can only do basic operations using hardware
- * sequencer. All other operations are supposed to be carried out
- * using software sequencer.
- */
- if (ispi->swseq_reg) {
- /* Disable #SMI generation from SW sequencer */
- val = readl(ispi->sregs + SSFSTS_CTL);
- val &= ~SSFSTS_CTL_FSMIE;
- writel(val, ispi->sregs + SSFSTS_CTL);
- }
-
- /* Check controller's lock status */
- val = readl(ispi->base + HSFSTS_CTL);
- ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
-
- if (ispi->locked && ispi->sregs) {
- /*
- * BIOS programs allowed opcodes and then locks down the
- * register. So read back what opcodes it decided to support.
- * That's the set we are going to support as well.
- */
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
-
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
- }
- }
-
- intel_spi_dump_regs(ispi);
-
- return 0;
+ return ispi->info->set_writeable(ispi->base, ispi->info->data);
}
static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
@@ -537,7 +425,6 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
default:
return -EINVAL;
}
-
}
writel(val, ispi->sregs + SSFSTS_CTL);
@@ -554,31 +441,35 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
return 0;
}
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- size_t len)
+static int intel_spi_read_reg(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
int ret;
/* Address of the first chip */
writel(0, ispi->base + FADDR);
if (ispi->swseq_reg)
- ret = intel_spi_sw_cycle(ispi, opcode, len,
+ ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
if (ret)
return ret;
- return intel_spi_read_block(ispi, buf, len);
+ return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
}
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
- size_t len)
+static int intel_spi_write_reg(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
int ret;
/*
@@ -623,23 +514,25 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
writel(0, ispi->base + FADDR);
/* Write the value beforehand */
- ret = intel_spi_write_block(ispi, buf, len);
+ ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
if (ret)
return ret;
if (ispi->swseq_reg)
- return intel_spi_sw_cycle(ispi, opcode, len,
+ return intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_WRITE_NO_ADDR);
- return intel_spi_hw_cycle(ispi, opcode, len);
+ return intel_spi_hw_cycle(ispi, opcode, nbytes);
}
-static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
- u_char *read_buf)
+static int intel_spi_read(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
+ void *read_buf = op->data.buf.in;
+ size_t block_size, nbytes = op->data.nbytes;
+ u32 addr = op->addr.val;
u32 val, status;
- ssize_t ret;
+ int ret;
/*
* Atomic sequence is not expected with HW sequencer reads. Make
@@ -648,24 +541,14 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
if (WARN_ON_ONCE(ispi->atomic_preopcode))
ispi->atomic_preopcode = 0;
- switch (nor->read_opcode) {
- case SPINOR_OP_READ:
- case SPINOR_OP_READ_FAST:
- case SPINOR_OP_READ_4B:
- case SPINOR_OP_READ_FAST_4B:
- break;
- default:
- return -EINVAL;
- }
-
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Read cannot cross 4K boundary */
- block_size = min_t(loff_t, from + block_size,
- round_up(from + 1, SZ_4K)) - from;
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
- writel(from, ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
@@ -686,8 +569,7 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
ret = -EACCES;
if (ret < 0) {
- dev_err(ispi->dev, "read error: %llx: %#x\n", from,
- status);
+ dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
return ret;
}
@@ -695,34 +577,35 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
if (ret)
return ret;
- len -= block_size;
- from += block_size;
- retlen += block_size;
+ nbytes -= block_size;
+ addr += block_size;
read_buf += block_size;
}
- return retlen;
+ return 0;
}
-static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *write_buf)
+static int intel_spi_write(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
+ size_t block_size, nbytes = op->data.nbytes;
+ const void *write_buf = op->data.buf.out;
+ u32 addr = op->addr.val;
u32 val, status;
- ssize_t ret;
+ int ret;
/* Not needed with HW sequencer write, make sure it is cleared */
ispi->atomic_preopcode = 0;
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Write cannot cross 4K boundary */
- block_size = min_t(loff_t, to + block_size,
- round_up(to + 1, SZ_4K)) - to;
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
- writel(to, ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
@@ -753,79 +636,476 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
ret = -EACCES;
if (ret < 0) {
- dev_err(ispi->dev, "write error: %llx: %#x\n", to,
- status);
+ dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
return ret;
}
- len -= block_size;
- to += block_size;
- retlen += block_size;
+ nbytes -= block_size;
+ addr += block_size;
write_buf += block_size;
}
- return retlen;
+ return 0;
}
-static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+static int intel_spi_erase(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- size_t erase_size, len = nor->mtd.erasesize;
- struct intel_spi *ispi = nor->priv;
- u32 val, status, cmd;
+ u8 opcode = op->cmd.opcode;
+ u32 addr = op->addr.val;
+ u32 val, status;
int ret;
- /* If the hardware can do 64k erase use that when possible */
- if (len >= SZ_64K && ispi->erase_64k) {
- cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
- erase_size = SZ_64K;
- } else {
- cmd = HSFSTS_CTL_FCYCLE_ERASE;
- erase_size = SZ_4K;
+ writel(addr, ispi->base + FADDR);
+
+ if (ispi->swseq_erase)
+ return intel_spi_sw_cycle(ispi, opcode, 0,
+ OPTYPE_WRITE_WITH_ADDR);
+
+ /* Not needed with HW sequencer erase, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ val |= iop->replacement_op;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
+ iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
+ iop->mem_op.cmd.dtr != op->cmd.dtr ||
+ iop->mem_op.cmd.opcode != op->cmd.opcode)
+ return false;
+
+ if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
+ iop->mem_op.addr.dtr != op->addr.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != op->data.dir ||
+ iop->mem_op.data.dtr != op->data.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
+ if (iop->mem_op.data.buswidth != op->data.buswidth)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct intel_spi_mem_op *
+intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
+{
+ const struct intel_spi_mem_op *iop;
+
+ for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
+ if (intel_spi_cmp_mem_op(iop, op))
+ break;
}
- if (ispi->swseq_erase) {
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
+ return iop->mem_op.cmd.opcode ? iop : NULL;
+}
+
+static bool intel_spi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop) {
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
+ }
- ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
- 0, OPTYPE_WRITE_WITH_ADDR);
- if (ret)
- return ret;
+ /*
+ * For software sequencer check that the opcode is actually
+ * present in the opmenu if it is locked.
+ */
+ if (ispi->swseq_reg && ispi->locked) {
+ int i;
- offs += erase_size;
- len -= erase_size;
+ /* Check if it is in the locked opcodes list */
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
+ if (ispi->opcodes[i] == op->cmd.opcode)
+ return true;
}
- return 0;
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
}
- /* Not needed with HW sequencer erase, make sure it is cleared */
- ispi->atomic_preopcode = 0;
+ return true;
+}
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
+static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
- val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= cmd;
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop)
+ return -EOPNOTSUPP;
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret)
- return ret;
+ return iop->exec_op(ispi, iop, op);
+}
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- return -EIO;
- else if (status & HSFSTS_CTL_AEL)
- return -EACCES;
+static const char *intel_spi_get_name(struct spi_mem *mem)
+{
+ const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+
+ /*
+ * Return name of the flash controller device to be compatible
+ * with the MTD version.
+ */
+ return dev_name(ispi->dev);
+}
+
+static const struct spi_controller_mem_ops intel_spi_mem_ops = {
+ .supports_op = intel_spi_supports_mem_op,
+ .exec_op = intel_spi_exec_mem_op,
+ .get_name = intel_spi_get_name,
+};
+
+#define INTEL_SPI_OP_ADDR(__nbytes) \
+ { \
+ .nbytes = __nbytes, \
+ }
+
+#define INTEL_SPI_OP_NO_DATA \
+ { \
+ .dir = SPI_MEM_NO_DATA, \
+ }
+
+#define INTEL_SPI_OP_DATA_IN(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_IN, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_OP_DATA_OUT(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_OUT, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ }
+
+#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ .replacement_op = __repl, \
+ }
+
+/*
+ * The controller handles pretty much everything internally based on the
+ * SFDP data but we want to make sure we only support the operations
+ * actually possible. Only check buswidth and transfer direction, the
+ * core validates data.
+ */
+#define INTEL_SPI_GENERIC_OPS \
+ /* Status register operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write_reg), \
+ /* Normal read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Write operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ /* Erase operations */ \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE) \
+
+static const struct intel_spi_mem_op generic_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ { },
+};
+
+static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ /* 64k sector erase operations */
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(3),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ { },
+};
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
+ bool erase_64k = false;
+ int i;
+
+ switch (ispi->info->type) {
+ case INTEL_SPI_BYT:
+ ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BYT_PR;
+ ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_LPT:
+ ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + LPT_PR;
+ ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_BXT:
+ ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BXT_PR;
+ ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
+ erase_64k = true;
+ break;
+
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Try to disable write protection if user asked to do so */
+ if (writeable && !intel_spi_set_writeable(ispi)) {
+ dev_warn(ispi->dev, "can't disable chip write protection\n");
+ writeable = false;
+ }
+
+ /* Disable #SMI generation from HW sequencer */
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~HSFSTS_CTL_FSMIE;
+ writel(val, ispi->base + HSFSTS_CTL);
- offs += erase_size;
- len -= erase_size;
+ /*
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
+ */
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ erase_64k = false;
+
+ if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Some controllers can only do basic operations using hardware
+ * sequencer. All other operations are supposed to be carried out
+ * using software sequencer.
+ */
+ if (ispi->swseq_reg) {
+ /* Disable #SMI generation from SW sequencer */
+ val = readl(ispi->sregs + SSFSTS_CTL);
+ val &= ~SSFSTS_CTL_FSMIE;
+ writel(val, ispi->sregs + SSFSTS_CTL);
}
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked && ispi->sregs) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
+
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+ }
+ }
+
+ if (erase_64k) {
+ dev_dbg(ispi->dev, "Using erase_64k memory operations");
+ ispi->mem_ops = erase_64k_mem_ops;
+ } else {
+ dev_dbg(ispi->dev, "Using generic memory operations");
+ ispi->mem_ops = generic_mem_ops;
+ }
+
+ intel_spi_dump_regs(ispi);
return 0;
}
@@ -884,9 +1164,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
/*
* If any of the regions have protection bits set, make the
* whole partition read-only to be on the safe side.
+ *
+ * Also if the user did not ask the chip to be writeable
+ * mask the bit too.
*/
- if (intel_spi_is_protected(ispi, base, limit))
- ispi->writeable = false;
+ if (!writeable || intel_spi_is_protected(ispi, base, limit))
+ part->mask_flags |= MTD_WRITEABLE;
end = (limit << 12) + 4096;
if (end > part->size)
@@ -894,75 +1177,74 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
-static const struct spi_nor_controller_ops intel_spi_controller_ops = {
- .read_reg = intel_spi_read_reg,
- .write_reg = intel_spi_write_reg,
- .read = intel_spi_read,
- .write = intel_spi_write,
- .erase = intel_spi_erase,
-};
+static int intel_spi_populate_chip(struct intel_spi *ispi)
+{
+ struct flash_platform_data *pdata;
+ struct spi_board_info chip;
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info)
+ pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->nr_parts = 1;
+ pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts),
+ pdata->nr_parts, GFP_KERNEL);
+ if (!pdata->parts)
+ return -ENOMEM;
+
+ intel_spi_fill_partition(ispi, pdata->parts);
+
+ memset(&chip, 0, sizeof(chip));
+ snprintf(chip.modalias, 8, "spi-nor");
+ chip.platform_data = pdata;
+
+ return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
+}
+
+/**
+ * intel_spi_probe() - Probe the Intel SPI flash controller
+ * @dev: Pointer to the parent device
+ * @mem: MMIO resource
+ * @info: Platform spefific information
+ *
+ * Probes Intel SPI flash controller and creates the flash chip device.
+ * Returns %0 on success and negative errno in case of failure.
+ */
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_PP,
- };
- struct mtd_partition part;
+ struct spi_controller *master;
struct intel_spi *ispi;
int ret;
- if (!info || !mem)
- return ERR_PTR(-EINVAL);
+ master = devm_spi_alloc_master(dev, sizeof(*ispi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mem_ops = &intel_spi_mem_ops;
- ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
- if (!ispi)
- return ERR_PTR(-ENOMEM);
+ ispi = spi_master_get_devdata(master);
ispi->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(ispi->base))
- return ERR_CAST(ispi->base);
+ return PTR_ERR(ispi->base);
ispi->dev = dev;
+ ispi->master = master;
ispi->info = info;
- ispi->writeable = info->writeable;
ret = intel_spi_init(ispi);
if (ret)
- return ERR_PTR(ret);
-
- ispi->nor.dev = ispi->dev;
- ispi->nor.priv = ispi;
- ispi->nor.controller_ops = &intel_spi_controller_ops;
-
- ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
- if (ret) {
- dev_info(dev, "failed to locate the chip\n");
- return ERR_PTR(ret);
- }
-
- intel_spi_fill_partition(ispi, &part);
-
- /* Prevent writes if not explicitly enabled */
- if (!ispi->writeable || !writeable)
- ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
+ return ret;
- ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
+ ret = devm_spi_register_master(dev, master);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- return ispi;
+ return intel_spi_populate_chip(ispi);
}
EXPORT_SYMBOL_GPL(intel_spi_probe);
-int intel_spi_remove(struct intel_spi *ispi)
-{
- return mtd_device_unregister(&ispi->nor.mtd);
-}
-EXPORT_SYMBOL_GPL(intel_spi_remove);
-
MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h
new file mode 100644
index 000000000000..a4f0327a46ff
--- /dev/null
+++ b/drivers/spi/spi-intel.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#ifndef SPI_INTEL_H
+#define SPI_INTEL_H
+
+#include <linux/platform_data/x86/spi-intel.h>
+
+struct resource;
+
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info);
+
+#endif /* SPI_INTEL_H */
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index bcb52601804a..aae26f62ea87 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -906,17 +906,11 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
struct spi_master *master;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
- const struct of_device_id *match;
u32 id, supports_dma, revision;
unsigned int num_cs;
int err;
- match = of_match_device(lantiq_ssc_match, dev);
- if (!match) {
- dev_err(dev, "no device match\n");
- return -EINVAL;
- }
- hwcfg = match->data;
+ hwcfg = of_device_get_match_data(dev);
master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
if (!master)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 37f4443ce9a0..e9d83d65873b 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -854,15 +854,13 @@ static int spi_mem_probe(struct spi_device *spi)
return memdrv->probe(mem);
}
-static int spi_mem_remove(struct spi_device *spi)
+static void spi_mem_remove(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->remove)
- return memdrv->remove(mem);
-
- return 0;
+ memdrv->remove(mem);
}
static void spi_mem_shutdown(struct spi_device *spi)
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index c208efeadd18..0bc7daa7afc8 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
writel_relaxed(0, spicc->base + SPICC_INTREG);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
0, NULL, spicc);
if (ret) {
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 78a9bca8cc68..03630359ce70 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
-#include <linux/gpio.h>
#include <asm/mpc52xx_psc.h>
enum {
@@ -128,17 +127,28 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
out_be32(psc_addr(mps, ccr), ccr);
mps->bits_per_word = cs->bits_per_word;
- if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
- mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 1);
+ }
}
static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
- mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
-
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 0);
+ }
}
/* extract and scale size field in txsz or rxsz */
@@ -363,7 +373,6 @@ static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
static int mpc512x_psc_spi_setup(struct spi_device *spi)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
- int ret;
if (spi->bits_per_word % 8)
return -EINVAL;
@@ -373,18 +382,6 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "can't get CS gpio: %d\n",
- ret);
- kfree(cs);
- return ret;
- }
- gpio_direction_output(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
-
spi->controller_state = cs;
}
@@ -396,8 +393,6 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
kfree(spi->controller_state);
}
@@ -476,11 +471,6 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
-{
- gpio_set_value(spi->cs_gpio, onoff);
-}
-
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq)
{
@@ -500,9 +490,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->type = (int)of_device_get_match_data(dev);
mps->irq = irq;
- if (pdata == NULL) {
- mps->cs_control = mpc512x_spi_cs_control;
- } else {
+ if (pdata) {
mps->cs_control = pdata->cs_control;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
@@ -513,6 +501,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
+ master->use_gpio_descriptors = true;
master->cleanup = mpc512x_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a15de10ee286..1a0b3208dfca 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -12,7 +12,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
@@ -31,6 +31,7 @@
#define SPI_CFG2_REG 0x0028
#define SPI_TX_SRC_REG_64 0x002c
#define SPI_RX_DST_REG_64 0x0030
+#define SPI_CFG3_IPM_REG 0x0040
#define SPI_CFG0_SCK_HIGH_OFFSET 0
#define SPI_CFG0_SCK_LOW_OFFSET 8
@@ -43,11 +44,15 @@
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
+
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
#define SPI_CFG2_SCK_HIGH_OFFSET 0
#define SPI_CFG2_SCK_LOW_OFFSET 16
@@ -68,7 +73,13 @@
#define SPI_CMD_TX_ENDIAN BIT(15)
#define SPI_CMD_FINISH_IE BIT(16)
#define SPI_CMD_PAUSE_IE BIT(17)
+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
+#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
#define MT8173_SPI_MAX_PAD_SEL 3
#define MTK_SPI_PAUSE_INT_STATUS 0x2
@@ -78,6 +89,7 @@
#define MTK_SPI_MAX_FIFO_SIZE 32U
#define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
#define MTK_SPI_32BITS_MASK (0xffffffff)
#define DMA_ADDR_EXT_BITS (36)
@@ -93,6 +105,9 @@ struct mtk_spi_compatible {
bool dma_ext;
/* some IC no need unprepare SPI clk */
bool no_need_unprepare;
+ /* IPM design adjust and extend register to support more features */
+ bool ipm_design;
+
};
struct mtk_spi {
@@ -116,6 +131,12 @@ static const struct mtk_spi_compatible mt2712_compat = {
.must_tx = true,
};
+static const struct mtk_spi_compatible mtk_ipm_compat = {
+ .enhance_timing = true,
+ .dma_ext = true,
+ .ipm_design = true,
+};
+
static const struct mtk_spi_compatible mt6765_compat = {
.need_pad_sel = true,
.must_tx = true,
@@ -157,6 +178,9 @@ static const struct mtk_chip_config mtk_default_chip_info = {
};
static const struct of_device_id mtk_spi_of_match[] = {
+ { .compatible = "mediatek,spi-ipm",
+ .data = (void *)&mtk_ipm_compat,
+ },
{ .compatible = "mediatek,mt2701-spi",
.data = (void *)&mtk_common_compat,
},
@@ -275,12 +299,11 @@ static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
return 0;
}
-static int mtk_spi_prepare_message(struct spi_master *master,
- struct spi_message *msg)
+static int mtk_spi_hw_init(struct spi_master *master,
+ struct spi_device *spi)
{
u16 cpha, cpol;
u32 reg_val;
- struct spi_device *spi = msg->spi;
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_master_get_devdata(master);
@@ -288,6 +311,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
cpol = spi->mode & SPI_CPOL ? 1 : 0;
reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (mdata->dev_comp->ipm_design) {
+ /* SPI transfer without idle time until packet length done */
+ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
+ if (spi->mode & SPI_LOOP)
+ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
+ else
+ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
+ }
+
if (cpha)
reg_val |= SPI_CMD_CPHA;
else
@@ -345,17 +377,39 @@ static int mtk_spi_prepare_message(struct spi_master *master,
mdata->base + SPI_PAD_SEL_REG);
/* tick delay */
- reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
- reg_val |= ((chip_config->tick_delay & 0x7)
- << SPI_CFG1_GET_TICK_DLY_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG1_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ if (mdata->dev_comp->ipm_design) {
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
+ reg_val |= ((chip_config->tick_delay & 0x3)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
/* set hw cs timing */
mtk_spi_set_hw_cs_timing(spi);
return 0;
}
+static int mtk_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return mtk_spi_hw_init(master, msg->spi);
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -377,13 +431,13 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
}
static void mtk_spi_prepare_transfer(struct spi_master *master,
- struct spi_transfer *xfer)
+ u32 speed_hz)
{
u32 div, sck_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
- if (xfer->speed_hz < mdata->spi_clk_hz / 2)
- div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
+ if (speed_hz < mdata->spi_clk_hz / 2)
+ div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
else
div = 1;
@@ -414,12 +468,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
u32 packet_size, packet_loop, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
- packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
+ if (mdata->dev_comp->ipm_design)
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_IPM_PACKET_SIZE);
+ else
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_PACKET_SIZE);
+
packet_loop = mdata->xfer_len / packet_size;
reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
+ if (mdata->dev_comp->ipm_design)
+ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
+ else
+ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
@@ -514,7 +580,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
mdata->num_xfered = 0;
- mtk_spi_prepare_transfer(master, xfer);
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
mtk_spi_setup_packet(master);
if (xfer->tx_buf) {
@@ -547,7 +613,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->num_xfered = 0;
- mtk_spi_prepare_transfer(master, xfer);
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
cmd = readl(mdata->base + SPI_CMD_REG);
if (xfer->tx_buf)
@@ -582,6 +648,19 @@ static int mtk_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ u32 reg_val = 0;
+
+ /* prepare xfer direction and duplex mode */
+ if (mdata->dev_comp->ipm_design) {
+ if (!xfer->tx_buf || !xfer->rx_buf) {
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ if (xfer->rx_buf)
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ }
+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
+ }
+
if (master->can_dma(master, spi, xfer))
return mtk_spi_dma_transfer(master, spi, xfer);
else
@@ -605,8 +684,9 @@ static int mtk_spi_setup(struct spi_device *spi)
if (!spi->controller_data)
spi->controller_data = (void *)&mtk_default_chip_info;
- if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
+ /* CS de-asserted, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
return 0;
}
@@ -624,7 +704,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
else
mdata->state = MTK_SPI_IDLE;
- if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+ if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
@@ -730,6 +810,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->can_dma = mtk_spi_can_dma;
master->setup = mtk_spi_setup;
master->set_cs_timing = mtk_spi_set_hw_cs_timing;
+ master->use_gpio_descriptors = true;
of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
if (!of_id) {
@@ -746,6 +827,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->must_tx)
master->flags = SPI_MASTER_MUST_TX;
+ if (mdata->dev_comp->ipm_design)
+ master->mode_bits |= SPI_LOOP;
if (mdata->dev_comp->need_pad_sel) {
mdata->pad_num = of_property_count_u32_elems(
@@ -853,25 +936,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_disable_runtime_pm;
}
- if (!master->cs_gpios && master->num_chipselect > 1) {
+ if (!master->cs_gpiods && master->num_chipselect > 1) {
dev_err(&pdev->dev,
"cs_gpios not specified and num_chipselect > 1\n");
ret = -EINVAL;
goto err_disable_runtime_pm;
}
-
- if (master->cs_gpios) {
- for (i = 0; i < master->num_chipselect; i++) {
- ret = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i],
- dev_name(&pdev->dev));
- if (ret) {
- dev_err(&pdev->dev,
- "can't get CS GPIO %i\n", i);
- goto err_disable_runtime_pm;
- }
- }
- }
}
if (mdata->dev_comp->dma_ext)
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 5c93730615f8..94fb09696677 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -95,6 +95,17 @@
#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+struct mtk_nor_caps {
+ u8 dma_bits;
+
+ /* extra_dummy_bit is adding for the IP of new SoCs.
+ * Some new SoCs modify the timing of fetching registers' values
+ * and IDs of nor flash, they need a extra_dummy_bit which can add
+ * more clock cycles for fetching data.
+ */
+ u8 extra_dummy_bit;
+};
+
struct mtk_nor {
struct spi_controller *ctlr;
struct device *dev;
@@ -104,11 +115,13 @@ struct mtk_nor {
struct clk *spi_clk;
struct clk *ctlr_clk;
struct clk *axi_clk;
+ struct clk *axi_s_clk;
unsigned int spi_freq;
bool wbuf_en;
bool has_irq;
bool high_dma;
struct completion op_done;
+ const struct mtk_nor_caps *caps;
};
static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
@@ -554,7 +567,12 @@ static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
}
// trigger op
- writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+ if (rx_len)
+ writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit,
+ sp->base + MTK_NOR_REG_PRG_CNT);
+ else
+ writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
prg_len * BITS_PER_BYTE);
if (ret)
@@ -674,6 +692,7 @@ static void mtk_nor_disable_clk(struct mtk_nor *sp)
clk_disable_unprepare(sp->spi_clk);
clk_disable_unprepare(sp->ctlr_clk);
clk_disable_unprepare(sp->axi_clk);
+ clk_disable_unprepare(sp->axi_s_clk);
}
static int mtk_nor_enable_clk(struct mtk_nor *sp)
@@ -697,6 +716,14 @@ static int mtk_nor_enable_clk(struct mtk_nor *sp)
return ret;
}
+ ret = clk_prepare_enable(sp->axi_s_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+ clk_disable_unprepare(sp->axi_clk);
+ return ret;
+ }
+
return 0;
}
@@ -743,9 +770,25 @@ static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
.exec_op = mtk_nor_exec_op
};
+static const struct mtk_nor_caps mtk_nor_caps_mt8173 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 0,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8186 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 1,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8192 = {
+ .dma_bits = 36,
+ .extra_dummy_bit = 0,
+};
+
static const struct of_device_id mtk_nor_match[] = {
- { .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
- { .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
+ { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 },
+ { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 },
+ { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_nor_match);
@@ -754,10 +797,10 @@ static int mtk_nor_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_nor *sp;
+ struct mtk_nor_caps *caps;
void __iomem *base;
- struct clk *spi_clk, *ctlr_clk, *axi_clk;
+ struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk;
int ret, irq;
- unsigned long dma_bits;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -775,10 +818,16 @@ static int mtk_nor_probe(struct platform_device *pdev)
if (IS_ERR(axi_clk))
return PTR_ERR(axi_clk);
- dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
- dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
- return -EINVAL;
+ axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s");
+ if (IS_ERR(axi_s_clk))
+ return PTR_ERR(axi_s_clk);
+
+ caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits);
+ return ret;
}
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
@@ -808,7 +857,9 @@ static int mtk_nor_probe(struct platform_device *pdev)
sp->spi_clk = spi_clk;
sp->ctlr_clk = ctlr_clk;
sp->axi_clk = axi_clk;
- sp->high_dma = (dma_bits > 32);
+ sp->axi_s_clk = axi_s_clk;
+ sp->caps = caps;
+ sp->high_dma = caps->dma_bits > 32;
sp->buffer = dmam_alloc_coherent(&pdev->dev,
MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
&sp->buffer_dma, GFP_KERNEL);
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index b62471ab6d7f..ba67dbed9fb8 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -201,7 +201,7 @@ struct fiu_data {
int fiu_max;
};
-static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
+static const struct npcm_fiu_info npcm7xx_fiu_info[] = {
{.name = "FIU0", .fiu_id = FIU0,
.max_map_size = MAP_SIZE_128MB, .max_cs = 2},
{.name = "FIU3", .fiu_id = FIU3,
@@ -209,8 +209,8 @@ static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
{.name = "FIUX", .fiu_id = FIUX,
.max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
-static const struct fiu_data npxm7xx_fiu_data = {
- .npcm_fiu_data_info = npxm7xx_fiu_info,
+static const struct fiu_data npcm7xx_fiu_data = {
+ .npcm_fiu_data_info = npcm7xx_fiu_info,
.fiu_max = 3,
};
@@ -664,14 +664,13 @@ static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
};
static const struct of_device_id npcm_fiu_dt_ids[] = {
- { .compatible = "nuvoton,npcm750-fiu", .data = &npxm7xx_fiu_data },
+ { .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data },
{ /* sentinel */ }
};
static int npcm_fiu_probe(struct platform_device *pdev)
{
const struct fiu_data *fiu_data_match;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct npcm_fiu_spi *fiu;
@@ -685,13 +684,12 @@ static int npcm_fiu_probe(struct platform_device *pdev)
fiu = spi_controller_get_devdata(ctrl);
- match = of_match_device(npcm_fiu_dt_ids, dev);
- if (!match || !match->data) {
+ fiu_data_match = of_device_get_match_data(dev);
+ if (!fiu_data_match) {
dev_err(dev, "No compatible OF match\n");
return -ENODEV;
}
- fiu_data_match = match->data;
id = of_alias_get_id(dev->of_node, "fiu");
if (id < 0 || id >= fiu_data_match->fiu_max) {
dev_err(dev, "Invalid platform device id: %d\n", id);
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index f86433b29260..7e5c09a7d489 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -591,18 +591,16 @@ static int pic32_spi_setup(struct spi_device *spi)
* unreliable/erroneous SPI transactions.
* To avoid that we will always handle /CS by toggling GPIO.
*/
- if (!gpio_is_valid(spi->cs_gpio))
+ if (!spi->cs_gpiod)
return -EINVAL;
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
-
return 0;
}
static void pic32_spi_cleanup(struct spi_device *spi)
{
- /* de-activate cs-gpio */
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ /* de-activate cs-gpio, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
}
static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
@@ -784,6 +782,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
master->unprepare_message = pic32_spi_unprepare_message;
master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
+ master->use_gpio_descriptors = true;
/* optional DMA support */
ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 2e134eb4bd2c..861b21c63504 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -15,32 +15,20 @@
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-dw.h>
-enum {
- PORT_QUARK_X1000,
- PORT_BYT,
- PORT_MRFLD,
- PORT_BSW0,
- PORT_BSW1,
- PORT_BSW2,
- PORT_CE4100,
- PORT_LPT0,
- PORT_LPT1,
-};
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935
+#define PCI_DEVICE_ID_INTEL_BYT 0x0f0e
+#define PCI_DEVICE_ID_INTEL_MRFLD 0x1194
+#define PCI_DEVICE_ID_INTEL_BSW0 0x228e
+#define PCI_DEVICE_ID_INTEL_BSW1 0x2290
+#define PCI_DEVICE_ID_INTEL_BSW2 0x22ac
+#define PCI_DEVICE_ID_INTEL_CE4100 0x2e6a
+#define PCI_DEVICE_ID_INTEL_LPT0_0 0x9c65
+#define PCI_DEVICE_ID_INTEL_LPT0_1 0x9c66
+#define PCI_DEVICE_ID_INTEL_LPT1_0 0x9ce5
+#define PCI_DEVICE_ID_INTEL_LPT1_1 0x9ce6
struct pxa_spi_info {
- enum pxa_ssp_type type;
- int port_id;
- int num_chipselect;
- unsigned long max_clk_rate;
-
- /* DMA channel request parameters */
- bool (*dma_filter)(struct dma_chan *chan, void *param);
- void *tx_param;
- void *rx_param;
-
- int dma_burst_size;
-
- int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
+ int (*setup)(struct pci_dev *pdev, struct pxa2xx_spi_controller *c);
};
static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
@@ -65,6 +53,24 @@ static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+static void pxa2xx_spi_pci_clk_unregister(void *clk)
+{
+ clk_unregister(clk);
+}
+
+static int pxa2xx_spi_pci_clk_register(struct pci_dev *dev, struct ssp_device *ssp,
+ unsigned long rate)
+{
+ char buf[40];
+
+ snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0, rate);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ return devm_add_action_or_reset(&dev->dev, pxa2xx_spi_pci_clk_unregister, ssp->clk);
+}
+
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *dws = param;
@@ -76,55 +82,131 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
return true;
}
-static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+static void lpss_dma_put_device(void *dma_dev)
{
+ pci_dev_put(dma_dev);
+}
+
+static int lpss_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+ struct dw_dma_slave *tx, *rx;
struct pci_dev *dma_dev;
+ int ret;
- c->num_chipselect = 1;
- c->max_clk_rate = 50000000;
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT:
+ ssp->type = LPSS_BYT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &byt_tx_param;
+ c->rx_param = &byt_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW0:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &bsw0_tx_param;
+ c->rx_param = &bsw0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW1:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &bsw1_tx_param;
+ c->rx_param = &bsw1_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW2:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 2;
+ c->tx_param = &bsw2_tx_param;
+ c->rx_param = &bsw2_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_0:
+ case PCI_DEVICE_ID_INTEL_LPT1_0:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &lpt0_tx_param;
+ c->rx_param = &lpt0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_1:
+ case PCI_DEVICE_ID_INTEL_LPT1_1:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &lpt1_tx_param;
+ c->rx_param = &lpt1_rx_param;
+ break;
+ default:
+ return -ENODEV;
+ }
- dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ c->num_chipselect = 1;
- if (c->tx_param) {
- struct dw_dma_slave *slave = c->tx_param;
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+ if (ret)
+ return ret;
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
- }
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
- if (c->rx_param) {
- struct dw_dma_slave *slave = c->rx_param;
+ tx = c->tx_param;
+ tx->dma_dev = &dma_dev->dev;
+ tx->m_master = 0;
+ tx->p_master = 1;
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
- }
+ rx = c->rx_param;
+ rx->dma_dev = &dma_dev->dev;
+ rx->m_master = 0;
+ rx->p_master = 1;
c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 1;
+ c->enable_dma = 1;
return 0;
}
-static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+static const struct pxa_spi_info lpss_info_config = {
+ .setup = lpss_spi_setup,
+};
+
+static int ce4100_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
- struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = PXA25x_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = dev->devfn;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 3686400);
+}
+
+static const struct pxa_spi_info ce4100_info_config = {
+ .setup = ce4100_spi_setup,
+};
+
+static int mrfld_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
struct dw_dma_slave *tx, *rx;
+ struct pci_dev *dma_dev;
+ int ret;
+
+ ssp->type = MRFLD_SSP;
switch (PCI_FUNC(dev->devfn)) {
case 0:
- c->port_id = 3;
+ ssp->port_id = 3;
c->num_chipselect = 1;
c->tx_param = &mrfld3_tx_param;
c->rx_param = &mrfld3_rx_param;
break;
case 1:
- c->port_id = 5;
+ ssp->port_id = 5;
c->num_chipselect = 4;
c->tx_param = &mrfld5_tx_param;
c->rx_param = &mrfld5_rx_param;
break;
case 2:
- c->port_id = 6;
+ ssp->port_id = 6;
c->num_chipselect = 1;
c->tx_param = &mrfld6_tx_param;
c->rx_param = &mrfld6_rx_param;
@@ -133,6 +215,15 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
return -ENODEV;
}
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 25000000);
+ if (ret)
+ return ret;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
+
tx = c->tx_param;
tx->dma_dev = &dma_dev->dev;
@@ -141,81 +232,38 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
c->dma_filter = lpss_dma_filter;
c->dma_burst_size = 8;
+ c->enable_dma = 1;
return 0;
}
-static struct pxa_spi_info spi_info_configs[] = {
- [PORT_CE4100] = {
- .type = PXA25x_SSP,
- .port_id = -1,
- .num_chipselect = -1,
- .max_clk_rate = 3686400,
- },
- [PORT_BYT] = {
- .type = LPSS_BYT_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &byt_tx_param,
- .rx_param = &byt_rx_param,
- },
- [PORT_BSW0] = {
- .type = LPSS_BSW_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &bsw0_tx_param,
- .rx_param = &bsw0_rx_param,
- },
- [PORT_BSW1] = {
- .type = LPSS_BSW_SSP,
- .port_id = 1,
- .setup = lpss_spi_setup,
- .tx_param = &bsw1_tx_param,
- .rx_param = &bsw1_rx_param,
- },
- [PORT_BSW2] = {
- .type = LPSS_BSW_SSP,
- .port_id = 2,
- .setup = lpss_spi_setup,
- .tx_param = &bsw2_tx_param,
- .rx_param = &bsw2_rx_param,
- },
- [PORT_MRFLD] = {
- .type = MRFLD_SSP,
- .max_clk_rate = 25000000,
- .setup = mrfld_spi_setup,
- },
- [PORT_QUARK_X1000] = {
- .type = QUARK_X1000_SSP,
- .port_id = -1,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
- },
- [PORT_LPT0] = {
- .type = LPSS_LPT_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &lpt0_tx_param,
- .rx_param = &lpt0_rx_param,
- },
- [PORT_LPT1] = {
- .type = LPSS_LPT_SSP,
- .port_id = 1,
- .setup = lpss_spi_setup,
- .tx_param = &lpt1_tx_param,
- .rx_param = &lpt1_rx_param,
- },
+static const struct pxa_spi_info mrfld_info_config = {
+ .setup = mrfld_spi_setup,
+};
+
+static int qrk_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = QUARK_X1000_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = 1;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+}
+
+static const struct pxa_spi_info qrk_info_config = {
+ .setup = qrk_spi_setup,
};
static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
+ const struct pxa_spi_info *info;
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
- struct pxa_spi_info *c;
- char buf[40];
ret = pcim_enable_device(dev);
if (ret)
@@ -225,27 +273,17 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
if (ret)
return ret;
- c = &spi_info_configs[ent->driver_data];
- if (c->setup) {
- ret = c->setup(dev, c);
- if (ret)
- return ret;
- }
-
memset(&spi_pdata, 0, sizeof(spi_pdata));
- spi_pdata.num_chipselect = (c->num_chipselect > 0) ? c->num_chipselect : dev->devfn;
- spi_pdata.dma_filter = c->dma_filter;
- spi_pdata.tx_param = c->tx_param;
- spi_pdata.rx_param = c->rx_param;
- spi_pdata.enable_dma = c->rx_param && c->tx_param;
- spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
- ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
- ssp->type = c->type;
+
+ info = (struct pxa_spi_info *)ent->driver_data;
+ ret = info->setup(dev, &spi_pdata);
+ if (ret)
+ return ret;
pci_set_master(dev);
@@ -254,14 +292,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return ret;
ssp->irq = pci_irq_vector(dev, 0);
- snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
- ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0,
- c->max_clk_rate);
- if (IS_ERR(ssp->clk))
- return PTR_ERR(ssp->clk);
-
memset(&pi, 0, sizeof(pi));
- pi.fwnode = dev->dev.fwnode;
+ pi.fwnode = dev_fwnode(&dev->dev);
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
@@ -269,10 +301,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
pi.size_data = sizeof(spi_pdata);
pdev = platform_device_register_full(&pi);
- if (IS_ERR(pdev)) {
- clk_unregister(ssp->clk);
+ if (IS_ERR(pdev))
return PTR_ERR(pdev);
- }
pci_set_drvdata(dev, pdev);
@@ -282,26 +312,22 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
- struct pxa2xx_spi_controller *spi_pdata;
-
- spi_pdata = dev_get_platdata(&pdev->dev);
platform_device_unregister(pdev);
- clk_unregister(spi_pdata->ssp.clk);
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
- { PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
- { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
- { PCI_VDEVICE(INTEL, 0x1194), PORT_MRFLD },
- { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
- { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
- { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
- { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
- { PCI_VDEVICE(INTEL, 0x9c65), PORT_LPT0 },
- { PCI_VDEVICE(INTEL, 0x9c66), PORT_LPT1 },
- { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
+ { PCI_DEVICE_DATA(INTEL, QUARK_X1000, &qrk_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BYT, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, MRFLD, &mrfld_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW2, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, CE4100, &ce4100_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_1, &lpss_info_config) },
{ }
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index e88f86274eeb..edb42d08857d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -1163,57 +1162,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
return 0;
}
-static void cleanup_cs(struct spi_device *spi)
-{
- if (!gpio_is_valid(spi->cs_gpio))
- return;
-
- gpio_free(spi->cs_gpio);
- spi->cs_gpio = -ENOENT;
-}
-
-static int setup_cs(struct spi_device *spi, struct chip_data *chip,
- struct pxa2xx_spi_chip *chip_info)
-{
- struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
-
- if (chip == NULL)
- return 0;
-
- if (chip_info == NULL)
- return 0;
-
- if (drv_data->ssp_type == CE4100_SSP)
- return 0;
-
- /*
- * NOTE: setup() can be called multiple times, possibly with
- * different chip_info, release previously requested GPIO.
- */
- cleanup_cs(spi);
-
- if (gpio_is_valid(chip_info->gpio_cs)) {
- int gpio = chip_info->gpio_cs;
- int err;
-
- err = gpio_request(gpio, "SPI_CS");
- if (err) {
- dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio);
- return err;
- }
-
- err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH));
- if (err) {
- gpio_free(gpio);
- return err;
- }
-
- spi->cs_gpio = gpio;
- }
-
- return 0;
-}
-
static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info;
@@ -1222,7 +1170,6 @@ static int setup(struct spi_device *spi)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
- int err;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1365,21 +1312,13 @@ static int setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
- if (drv_data->ssp_type == CE4100_SSP)
- return 0;
-
- err = setup_cs(spi, chip, chip_info);
- if (err)
- kfree(chip);
-
- return err;
+ return 0;
}
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
- cleanup_cs(spi);
kfree(chip);
}
@@ -1455,6 +1394,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
+ /* RPL-S */
+ { PCI_VDEVICE(INTEL, 0x7a2a), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a2b), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a79), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a7b), LPSS_CNL_SSP },
/* ADL-S */
{ PCI_VDEVICE(INTEL, 0x7aaa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP },
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index d39dec6d1c91..00d6084306b4 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -593,7 +593,6 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
{
struct spi_qup *controller = dev_id;
u32 opflags, qup_err, spi_err;
- unsigned long flags;
int error = 0;
qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
@@ -625,10 +624,10 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
error = -EIO;
}
- spin_lock_irqsave(&controller->lock, flags);
+ spin_lock(&controller->lock);
if (!controller->error)
controller->error = error;
- spin_unlock_irqrestore(&controller->lock, flags);
+ spin_unlock(&controller->lock);
if (spi_qup_is_dma_xfer(controller->mode)) {
writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index a46b38544027..bd87d3c92dd3 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -624,10 +624,8 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
/* Find the irq */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to get the irq\n");
+ if (ret < 0)
goto err_irq;
- }
ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
0, pdev->name, sfc);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 553b6b9d0222..cdc16eecaf6b 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -133,7 +133,8 @@
#define INT_TF_OVERFLOW (1 << 1)
#define INT_RF_UNDERFLOW (1 << 2)
#define INT_RF_OVERFLOW (1 << 3)
-#define INT_RF_FULL (1 << 4)
+#define INT_RF_FULL (1 << 4)
+#define INT_CS_INACTIVE (1 << 6)
/* Bit fields in ICR, 4bit */
#define ICR_MASK 0x0f
@@ -194,6 +195,8 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
bool slave_abort;
+ bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ struct spi_transfer *xfer; /* Store xfer temporarily */
};
static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
@@ -275,8 +278,9 @@ static void rockchip_spi_handle_err(struct spi_controller *ctlr,
*/
spi_enable_chip(rs, false);
- /* make sure all interrupts are masked */
+ /* make sure all interrupts are masked and status cleared */
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
if (atomic_read(&rs->state) & TXDMA)
dmaengine_terminate_async(ctlr->dma_tx);
@@ -343,6 +347,15 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
struct spi_controller *ctlr = dev_id;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ /* When int_cs_inactive comes, spi slave abort */
+ if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
+ ctlr->slave_abort(ctlr);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ return IRQ_HANDLED;
+ }
+
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
@@ -350,6 +363,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
if (!rs->rx_left) {
spi_enable_chip(rs, false);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
spi_finalize_current_transfer(ctlr);
}
@@ -357,14 +371,18 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
}
static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
- struct spi_transfer *xfer)
+ struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
{
rs->tx = xfer->tx_buf;
rs->rx = xfer->rx_buf;
rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
rs->rx_left = xfer->len / rs->n_bytes;
- writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+ if (rs->cs_inactive)
+ writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+ else
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
spi_enable_chip(rs, true);
if (rs->tx_left)
@@ -383,6 +401,9 @@ static void rockchip_spi_dma_rxcb(void *data)
if (state & TXDMA && !rs->slave_abort)
return;
+ if (rs->cs_inactive)
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
}
@@ -423,14 +444,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
atomic_set(&rs->state, 0);
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+
rxdesc = NULL;
if (xfer->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = rs->dma_addr_rx,
.src_addr_width = rs->n_bytes,
- .src_maxburst = rockchip_spi_calc_burst_size(xfer->len /
- rs->n_bytes),
+ .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
};
dmaengine_slave_config(ctlr->dma_rx, &rxconf);
@@ -474,10 +497,13 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
atomic_or(RXDMA, &rs->state);
- dmaengine_submit(rxdesc);
+ ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
dma_async_issue_pending(ctlr->dma_rx);
}
+ if (rs->cs_inactive)
+ writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, true);
if (txdesc) {
@@ -584,7 +610,48 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ u32 rx_fifo_left;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ /* Get current dma rx point */
+ if (atomic_read(&rs->state) & RXDMA) {
+ dmaengine_pause(ctlr->dma_rx);
+ status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
+ if (status == DMA_ERROR) {
+ rs->rx = rs->xfer->rx_buf;
+ rs->xfer->len = 0;
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--)
+ readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ goto out;
+ } else {
+ rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
+ }
+ }
+
+ /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
+ if (rs->rx) {
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (rs->n_bytes == 1)
+ *(u8 *)rs->rx = (u8)rxw;
+ else
+ *(u16 *)rs->rx = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+ rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
+ }
+out:
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ atomic_set(&rs->state, 0);
+ spi_enable_chip(rs, false);
rs->slave_abort = true;
spi_finalize_current_transfer(ctlr);
@@ -620,7 +687,7 @@ static int rockchip_spi_transfer_one(
}
rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
-
+ rs->xfer = xfer;
use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
@@ -630,7 +697,7 @@ static int rockchip_spi_transfer_one(
if (use_dma)
return rockchip_spi_prepare_dma(rs, ctlr, xfer);
- return rockchip_spi_prepare_irq(rs, xfer);
+ return rockchip_spi_prepare_irq(rs, ctlr, xfer);
}
static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
@@ -647,6 +714,29 @@ static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
return xfer->len / bytes_per_word >= rs->fifo_len;
}
+static int rockchip_spi_setup(struct spi_device *spi)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
+ u32 cr0;
+
+ pm_runtime_get_sync(rs->dev);
+
+ cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ cr0 &= ~(0x3 << CR0_SCPH_OFFSET);
+ cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
+ if (spi->mode & SPI_CS_HIGH && spi->chip_select <= 1)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
+ else if (spi->chip_select <= 1)
+ cr0 &= ~(BIT(spi->chip_select) << CR0_SOI_OFFSET);
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ pm_runtime_put(rs->dev);
+
+ return 0;
+}
+
static int rockchip_spi_probe(struct platform_device *pdev)
{
int ret;
@@ -654,7 +744,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
- u32 rsd_nsecs;
+ u32 rsd_nsecs, num_cs;
bool slave_mode;
slave_mode = of_property_read_bool(np, "spi-slave");
@@ -764,8 +854,9 @@ static int rockchip_spi_probe(struct platform_device *pdev)
* rk spi0 has two native cs, spi1..5 one cs only
* if num-cs is missing in the dts, default to 1
*/
- if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
- ctlr->num_chipselect = 1;
+ if (of_property_read_u32(np, "num-cs", &num_cs))
+ num_cs = 1;
+ ctlr->num_chipselect = num_cs;
ctlr->use_gpio_descriptors = true;
}
ctlr->dev.of_node = pdev->dev.of_node;
@@ -773,6 +864,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
+ ctlr->setup = rockchip_spi_setup;
ctlr->set_cs = rockchip_spi_set_cs;
ctlr->transfer_one = rockchip_spi_transfer_one;
ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
@@ -808,8 +900,13 @@ static int rockchip_spi_probe(struct platform_device *pdev)
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
ctlr->mode_bits |= SPI_CS_HIGH;
+ if (ctlr->can_dma && slave_mode)
+ rs->cs_inactive = true;
+ else
+ rs->cs_inactive = false;
break;
default:
+ rs->cs_inactive = false;
break;
}
@@ -868,14 +965,14 @@ static int rockchip_spi_suspend(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = spi_controller_suspend(ctlr);
if (ret < 0)
return ret;
- ret = pm_runtime_force_suspend(dev);
- if (ret < 0)
- return ret;
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
pinctrl_pm_select_sleep_state(dev);
@@ -890,10 +987,14 @@ static int rockchip_spi_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = pm_runtime_force_resume(dev);
+ ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0)
+ clk_disable_unprepare(rs->apb_pclk);
+
ret = spi_controller_resume(ctlr);
if (ret < 0) {
clk_disable_unprepare(rs->spiclk);
@@ -935,7 +1036,7 @@ static int rockchip_spi_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops rockchip_spi_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
rockchip_spi_runtime_resume, NULL)
};
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index d6f51695ca5b..660aa866af06 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -62,9 +61,6 @@ struct s3c24xx_spi {
unsigned char fiq_inuse;
unsigned char fiq_claimed;
- void (*set_cs)(struct s3c2410_spi_info *spi,
- int cs, int pol);
-
/* data buffers */
const unsigned char *tx;
unsigned char *rx;
@@ -84,29 +80,21 @@ static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
return spi_master_get_devdata(sdev->master);
}
-static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol)
-{
- gpio_set_value(spi->pin_cs, pol);
-}
-
static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
{
struct s3c24xx_spi_devstate *cs = spi->controller_state;
struct s3c24xx_spi *hw = to_hw(spi);
- unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
/* change the chipselect state and the state of the spi engine clock */
switch (value) {
case BITBANG_CS_INACTIVE:
- hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
writeb(cs->spcon, hw->regs + S3C2410_SPCON);
break;
case BITBANG_CS_ACTIVE:
writeb(cs->spcon | S3C2410_SPCON_ENSCK,
hw->regs + S3C2410_SPCON);
- hw->set_cs(hw->pdata, spi->chip_select, cspol);
break;
}
}
@@ -452,14 +440,6 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
writeb(0xff, hw->regs + S3C2410_SPPRE);
writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
-
- if (hw->pdata) {
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_direction_output(hw->pdata->pin_cs, 1);
-
- if (hw->pdata->gpio_setup)
- hw->pdata->gpio_setup(hw->pdata, 1);
- }
}
static int s3c24xx_spi_probe(struct platform_device *pdev)
@@ -502,6 +482,9 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ /* we need to call the local chipselect callback */
+ master->flags = SPI_MASTER_GPIO_SS;
+ master->use_gpio_descriptors = true;
/* setup the state for the bitbang driver */
@@ -541,27 +524,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
goto err_no_pdata;
}
- /* setup any gpio we can */
-
- if (!pdata->set_cs) {
- if (pdata->pin_cs < 0) {
- dev_err(&pdev->dev, "No chipselect pin\n");
- err = -EINVAL;
- goto err_register;
- }
-
- err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
- dev_name(&pdev->dev));
- if (err) {
- dev_err(&pdev->dev, "Failed to get gpio for cs\n");
- goto err_register;
- }
-
- hw->set_cs = s3c24xx_spi_gpiocs;
- gpio_direction_output(pdata->pin_cs, 1);
- } else
- hw->set_cs = pdata->set_cs;
-
s3c24xx_spi_initialsetup(hw);
/* register our spi controller */
@@ -604,9 +566,6 @@ static int s3c24xx_spi_suspend(struct device *dev)
if (ret)
return ret;
- if (hw->pdata && hw->pdata->gpio_setup)
- hw->pdata->gpio_setup(hw->pdata, 0);
-
clk_disable(hw->clk);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 8755cd85e83c..c26440e9058d 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -13,10 +13,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/spi-s3c64xx.h>
@@ -656,7 +654,11 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
/* Configure feedback delay */
- writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+ if (!cs)
+ /* No delay if not defined */
+ writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
+ else
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
return 0;
}
@@ -796,16 +798,14 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
return ERR_PTR(-EINVAL);
}
- data_np = of_get_child_by_name(slave_np, "controller-data");
- if (!data_np) {
- dev_err(&spi->dev, "child node 'controller-data' not found\n");
- return ERR_PTR(-EINVAL);
- }
-
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs) {
- of_node_put(data_np);
+ if (!cs)
return ERR_PTR(-ENOMEM);
+
+ data_np = of_get_child_by_name(slave_np, "controller-data");
+ if (!data_np) {
+ dev_info(&spi->dev, "feedback delay set to default (0)\n");
+ return cs;
}
of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
@@ -830,34 +830,16 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
if (spi->dev.of_node) {
cs = s3c64xx_get_slave_ctrldata(spi);
spi->controller_data = cs;
- } else if (cs) {
- /* On non-DT platforms the SPI core will set spi->cs_gpio
- * to -ENOENT. The GPIO pin used to drive the chip select
- * is defined by using platform data so spi->cs_gpio value
- * has to be override to have the proper GPIO pin number.
- */
- spi->cs_gpio = cs->line;
}
- if (IS_ERR_OR_NULL(cs)) {
+ /* NULL is fine, we just avoid using the FB delay (=0) */
+ if (IS_ERR(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
- if (!spi_get_ctldata(spi)) {
- if (gpio_is_valid(spi->cs_gpio)) {
- err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (err) {
- dev_err(&spi->dev,
- "Failed to get /CS gpio [%d]: %d\n",
- spi->cs_gpio, err);
- goto err_gpio_req;
- }
- }
-
+ if (!spi_get_ctldata(spi))
spi_set_ctldata(spi, cs);
- }
pm_runtime_get_sync(&sdd->pdev->dev);
@@ -909,11 +891,9 @@ setup_exit:
/* setup() returns with device de-selected */
s3c64xx_spi_set_cs(spi, false);
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
spi_set_ctldata(spi, NULL);
-err_gpio_req:
+ /* This was dynamically allocated on the DT path */
if (spi->dev.of_node)
kfree(cs);
@@ -924,19 +904,9 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_free(spi->cs_gpio);
- if (spi->dev.of_node)
- kfree(cs);
- else {
- /* On non-DT platforms, the SPI core sets
- * spi->cs_gpio to -ENOENT and .setup()
- * overrides it with the GPIO pin value
- * passed using platform data.
- */
- spi->cs_gpio = -ENOENT;
- }
- }
+ /* This was dynamically allocated on the DT path */
+ if (spi->dev.of_node)
+ kfree(cs);
spi_set_ctldata(spi, NULL);
}
@@ -1131,6 +1101,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
master->num_chipselect = sci->num_cs;
+ master->use_gpio_descriptors = true;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
@@ -1442,6 +1413,16 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
+static struct s3c64xx_spi_port_config fsd_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = false,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1472,6 +1453,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos5433-spi",
.data = (void *)&exynos5433_spi_port_config,
},
+ { .compatible = "tesla,fsd-spi",
+ .data = (void *)&fsd_spi_port_config,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
index 169f3d595f60..d37cfe995a63 100644
--- a/drivers/spi/spi-slave-system-control.c
+++ b/drivers/spi/spi-slave-system-control.c
@@ -132,13 +132,12 @@ static int spi_slave_system_control_probe(struct spi_device *spi)
return 0;
}
-static int spi_slave_system_control_remove(struct spi_device *spi)
+static void spi_slave_system_control_remove(struct spi_device *spi)
{
struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
- return 0;
}
static struct spi_driver spi_slave_system_control_driver = {
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
index f2e07a392d68..f56c1afb8534 100644
--- a/drivers/spi/spi-slave-time.c
+++ b/drivers/spi/spi-slave-time.c
@@ -106,13 +106,12 @@ static int spi_slave_time_probe(struct spi_device *spi)
return 0;
}
-static int spi_slave_time_remove(struct spi_device *spi)
+static void spi_slave_time_remove(struct spi_device *spi)
{
struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
- return 0;
}
static struct spi_driver spi_slave_time_driver = {
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 6c44dda9ee8c..843be803696b 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -17,7 +17,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -171,11 +170,6 @@ static int spi_st_transfer_one(struct spi_master *master,
return t->len;
}
-static void spi_st_cleanup(struct spi_device *spi)
-{
- gpio_free(spi->cs_gpio);
-}
-
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
static int spi_st_setup(struct spi_device *spi)
@@ -183,29 +177,17 @@ static int spi_st_setup(struct spi_device *spi)
struct spi_st *spi_st = spi_master_get_devdata(spi->master);
u32 spi_st_clk, sscbrg, var;
u32 hz = spi->max_speed_hz;
- int cs = spi->cs_gpio;
- int ret;
if (!hz) {
dev_err(&spi->dev, "max_speed_hz unspecified\n");
return -EINVAL;
}
- if (!gpio_is_valid(cs)) {
- dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+ if (!spi->cs_gpiod) {
+ dev_err(&spi->dev, "no valid gpio assigned\n");
return -EINVAL;
}
- ret = gpio_request(cs, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "could not request gpio:%d\n", cs);
- return ret;
- }
-
- ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
- if (ret)
- goto out_free_gpio;
-
spi_st_clk = clk_get_rate(spi_st->clk);
/* Set SSC_BRF */
@@ -213,8 +195,7 @@ static int spi_st_setup(struct spi_device *spi)
if (sscbrg < 0x07 || sscbrg > BIT(16)) {
dev_err(&spi->dev,
"baudrate %d outside valid range %d\n", sscbrg, hz);
- ret = -EINVAL;
- goto out_free_gpio;
+ return -EINVAL;
}
spi_st->baud = spi_st_clk / (2 * sscbrg);
@@ -263,10 +244,6 @@ static int spi_st_setup(struct spi_device *spi)
readl_relaxed(spi_st->base + SSC_RBUF);
return 0;
-
-out_free_gpio:
- gpio_free(cs);
- return ret;
}
/* Interrupt fired when TX shift register becomes empty */
@@ -309,11 +286,11 @@ static int spi_st_probe(struct platform_device *pdev)
master->dev.of_node = np;
master->mode_bits = MODEBITS;
master->setup = spi_st_setup;
- master->cleanup = spi_st_cleanup;
master->transfer_one = spi_st_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
+ master->use_gpio_descriptors = true;
spi_st = spi_master_get_devdata(master);
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 514337c86d2c..ffdc55f87e82 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -688,7 +688,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- ctrl = spi_alloc_master(dev, sizeof(*qspi));
+ ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
@@ -697,58 +697,46 @@ static int stm32_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->io_base)) {
- ret = PTR_ERR(qspi->io_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->mm_base)) {
- ret = PTR_ERR(qspi->mm_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
qspi->mm_size = resource_size(res);
- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_master_put;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_master_put;
+ return ret;
}
init_completion(&qspi->data_completion);
init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(qspi->clk)) {
- ret = PTR_ERR(qspi->clk);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
qspi->clk_rate = clk_get_rate(qspi->clk);
- if (!qspi->clk_rate) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (!qspi->clk_rate)
+ return -EINVAL;
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err_master_put;
+ return ret;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -784,7 +772,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
- ret = devm_spi_register_master(dev, ctrl);
+ ret = spi_register_master(ctrl);
if (ret)
goto err_pm_runtime_free;
@@ -806,8 +794,6 @@ err_dma_free:
stm32_qspi_dma_free(qspi);
err_clk_disable:
clk_disable_unprepare(qspi->clk);
-err_master_put:
- spi_master_put(qspi->ctrl);
return ret;
}
@@ -817,6 +803,7 @@ static int stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
+ spi_unregister_master(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 9bd3fd1652f7..a6adc20f6862 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -221,7 +221,6 @@ struct stm32_spi;
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
- * @can_dma: routine to determine if the transfer is eligible for DMA use
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
@@ -232,7 +231,7 @@ struct stm32_spi;
* @baud_rate_div_min: minimum baud rate divisor
* @baud_rate_div_max: maximum baud rate divisor
* @has_fifo: boolean to know if fifo is used for driver
- * @has_startbit: boolean to know if start bit is used to start transfer
+ * @flags: compatible specific SPI controller flags used at registration time
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
@@ -253,6 +252,7 @@ struct stm32_spi_cfg {
unsigned int baud_rate_div_min;
unsigned int baud_rate_div_max;
bool has_fifo;
+ u16 flags;
};
/**
@@ -763,7 +763,7 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX)) {
/* OVR flag shouldn't be handled for TX only mode */
- sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
+ sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
mask |= STM32F4_SPI_SR_TXE;
}
@@ -1722,6 +1722,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
.baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
.has_fifo = false,
+ .flags = SPI_MASTER_MUST_TX,
};
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
@@ -1854,7 +1855,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- master->flags = SPI_MASTER_MUST_TX;
+ master->flags = spi->cfg->flags;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 1fdfc6e6691d..6000d0761206 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -280,7 +280,7 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
* SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
* Or we can use CDR2, which is calculated with the formula:
* SPI_CLK = MOD_CLK / (2 * (cdr + 1))
- * Wether we use the former or the latter is set through the
+ * Whether we use the former or the latter is set through the
* DRS bit.
*
* First try CDR2, and if we can't reach the expected
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
new file mode 100644
index 000000000000..f989f7b99296
--- /dev/null
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Sunplus Inc.
+// Author: Li-hao Kuo <lhjeff911@gmail.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SP7021_DATA_RDY_REG 0x0044
+#define SP7021_SLAVE_DMA_CTRL_REG 0x0048
+#define SP7021_SLAVE_DMA_LENGTH_REG 0x004c
+#define SP7021_SLAVE_DMA_ADDR_REG 0x004c
+
+#define SP7021_SLAVE_DATA_RDY BIT(0)
+#define SP7021_SLAVE_SW_RST BIT(1)
+#define SP7021_SLA_DMA_W_INT BIT(8)
+#define SP7021_SLAVE_CLR_INT BIT(8)
+#define SP7021_SLAVE_DMA_EN BIT(0)
+#define SP7021_SLAVE_DMA_RW BIT(6)
+#define SP7021_SLAVE_DMA_CMD GENMASK(3, 2)
+
+#define SP7021_FIFO_REG 0x0034
+#define SP7021_SPI_STATUS_REG 0x0038
+#define SP7021_SPI_CONFIG_REG 0x003c
+#define SP7021_INT_BUSY_REG 0x004c
+#define SP7021_DMA_CTRL_REG 0x0050
+
+#define SP7021_SPI_START_FD BIT(0)
+#define SP7021_FD_SW_RST BIT(1)
+#define SP7021_TX_EMP_FLAG BIT(2)
+#define SP7021_RX_EMP_FLAG BIT(4)
+#define SP7021_RX_FULL_FLAG BIT(5)
+#define SP7021_FINISH_FLAG BIT(6)
+
+#define SP7021_TX_CNT_MASK GENMASK(11, 8)
+#define SP7021_RX_CNT_MASK GENMASK(15, 12)
+#define SP7021_TX_LEN_MASK GENMASK(23, 16)
+#define SP7021_GET_LEN_MASK GENMASK(31, 24)
+#define SP7021_SET_TX_LEN GENMASK(23, 16)
+#define SP7021_SET_XFER_LEN GENMASK(31, 24)
+
+#define SP7021_CPOL_FD BIT(0)
+#define SP7021_CPHA_R BIT(1)
+#define SP7021_CPHA_W BIT(2)
+#define SP7021_LSB_SEL BIT(4)
+#define SP7021_CS_POR BIT(5)
+#define SP7021_FD_SEL BIT(6)
+
+#define SP7021_RX_UNIT GENMASK(8, 7)
+#define SP7021_TX_UNIT GENMASK(10, 9)
+#define SP7021_TX_EMP_FLAG_MASK BIT(11)
+#define SP7021_RX_FULL_FLAG_MASK BIT(14)
+#define SP7021_FINISH_FLAG_MASK BIT(15)
+#define SP7021_CLEAN_RW_BYTE GENMASK(10, 7)
+#define SP7021_CLEAN_FLUG_MASK GENMASK(15, 11)
+#define SP7021_CLK_MASK GENMASK(31, 16)
+
+#define SP7021_INT_BYPASS BIT(3)
+#define SP7021_CLR_MASTER_INT BIT(6)
+
+#define SP7021_SPI_DATA_SIZE (255)
+#define SP7021_FIFO_DATA_LEN (16)
+
+enum {
+ SP7021_MASTER_MODE = 0,
+ SP7021_SLAVE_MODE = 1,
+};
+
+struct sp7021_spi_ctlr {
+ struct device *dev;
+ struct spi_controller *ctlr;
+ void __iomem *m_base;
+ void __iomem *s_base;
+ u32 xfer_conf;
+ int mode;
+ int m_irq;
+ int s_irq;
+ struct clk *spi_clk;
+ struct reset_control *rstc;
+ // irq spin lock
+ spinlock_t lock;
+ // data xfer lock
+ struct mutex buf_lock;
+ struct completion isr_done;
+ struct completion slave_isr;
+ unsigned int rx_cur_len;
+ unsigned int tx_cur_len;
+ unsigned int data_unit;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+};
+
+static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int data_status;
+
+ data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ data_status |= SP7021_SLAVE_CLR_INT;
+ writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
+ complete(&pspim->slave_isr);
+ return IRQ_HANDLED;
+}
+
+static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ complete(&pspim->slave_isr);
+ complete(&pspim->isr_done);
+ return 0;
+}
+
+static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->slave_isr);
+ value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->tx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ value = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ value |= SP7021_SLAVE_DATA_RDY;
+ writel(value, pspim->s_base + SP7021_DATA_RDY_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->isr_done);
+ value = SP7021_SLAVE_DMA_EN | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->rx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ writel(SP7021_SLAVE_SW_RST, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ return 0;
+}
+
+static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ pspim->rx_buf[pspim->rx_cur_len] =
+ readl(pspim->m_base + SP7021_FIFO_REG);
+ pspim->rx_cur_len++;
+ }
+}
+
+static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ writel(pspim->tx_buf[pspim->tx_cur_len],
+ pspim->m_base + SP7021_FIFO_REG);
+ pspim->tx_cur_len++;
+ }
+}
+
+static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int tx_cnt, total_len;
+ unsigned int tx_len, rx_cnt;
+ unsigned int fd_status;
+ bool isrdone = false;
+ u32 value;
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_cnt = FIELD_GET(SP7021_TX_CNT_MASK, fd_status);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if ((fd_status & SP7021_TX_EMP_FLAG) && (fd_status & SP7021_RX_EMP_FLAG) && total_len == 0)
+ return IRQ_NONE;
+
+ if (tx_len == 0 && total_len == 0)
+ return IRQ_NONE;
+
+ spin_lock_irq(&pspim->lock);
+
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+
+ tx_cnt = min(tx_len - pspim->tx_cur_len, pspim->data_unit - tx_cnt);
+ dev_dbg(pspim->dev, "fd_st=0x%x rx_c:%d tx_c:%d tx_l:%d",
+ fd_status, rx_cnt, tx_cnt, tx_len);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ if (tx_cnt > 0)
+ sp7021_spi_master_wb(pspim, tx_cnt);
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if (fd_status & SP7021_FINISH_FLAG || tx_len == pspim->tx_cur_len) {
+ while (total_len != pspim->rx_cur_len) {
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+ else
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ }
+ value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
+ value |= SP7021_CLR_MASTER_INT;
+ writel(value, pspim->m_base + SP7021_INT_BUSY_REG);
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ isrdone = true;
+ }
+
+ if (isrdone)
+ complete(&pspim->isr_done);
+ spin_unlock_irq(&pspim->lock);
+ return IRQ_HANDLED;
+}
+
+static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ pspim->tx_cur_len = 0;
+ pspim->rx_cur_len = 0;
+ pspim->data_unit = SP7021_FIFO_DATA_LEN;
+}
+
+// preliminary set CS, CPOL, CPHA and LSB
+static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct spi_device *s = msg->spi;
+ u32 valus, rs = 0;
+
+ valus = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ valus |= SP7021_FD_SW_RST;
+ writel(valus, pspim->m_base + SP7021_SPI_STATUS_REG);
+ rs |= SP7021_FD_SEL;
+ if (s->mode & SPI_CPOL)
+ rs |= SP7021_CPOL_FD;
+
+ if (s->mode & SPI_LSB_FIRST)
+ rs |= SP7021_LSB_SEL;
+
+ if (s->mode & SPI_CS_HIGH)
+ rs |= SP7021_CS_POR;
+
+ if (s->mode & SPI_CPHA)
+ rs |= SP7021_CPHA_R;
+ else
+ rs |= SP7021_CPHA_W;
+
+ rs |= FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ pspim->xfer_conf = rs;
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ return 0;
+}
+
+static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ u32 clk_rate, clk_sel, div;
+
+ clk_rate = clk_get_rate(pspim->spi_clk);
+ div = max(2U, clk_rate / xfer->speed_hz);
+
+ clk_sel = (div / 2) - 1;
+ pspim->xfer_conf &= ~SP7021_CLK_MASK;
+ pspim->xfer_conf |= FIELD_PREP(SP7021_CLK_MASK, clk_sel);
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+}
+
+static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ unsigned long timeout = msecs_to_jiffies(1000);
+ unsigned int xfer_cnt, xfer_len, last_len;
+ unsigned int i, len_temp;
+ u32 reg_temp;
+
+ xfer_cnt = xfer->len / SP7021_SPI_DATA_SIZE;
+ last_len = xfer->len % SP7021_SPI_DATA_SIZE;
+
+ for (i = 0; i <= xfer_cnt; i++) {
+ mutex_lock(&pspim->buf_lock);
+ sp7021_prep_transfer(ctlr, spi);
+ sp7021_spi_setup_clk(ctlr, xfer);
+ reinit_completion(&pspim->isr_done);
+
+ if (i == xfer_cnt)
+ xfer_len = last_len;
+ else
+ xfer_len = SP7021_SPI_DATA_SIZE;
+
+ pspim->tx_buf = xfer->tx_buf + i * SP7021_SPI_DATA_SIZE;
+ pspim->rx_buf = xfer->rx_buf + i * SP7021_SPI_DATA_SIZE;
+
+ if (pspim->tx_cur_len < xfer_len) {
+ len_temp = min(pspim->data_unit, xfer_len);
+ sp7021_spi_master_wb(pspim, len_temp);
+ }
+ reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
+ reg_temp &= ~SP7021_CLEAN_RW_BYTE;
+ reg_temp &= ~SP7021_CLEAN_FLUG_MASK;
+ reg_temp |= SP7021_FD_SEL | SP7021_FINISH_FLAG_MASK |
+ SP7021_TX_EMP_FLAG_MASK | SP7021_RX_FULL_FLAG_MASK |
+ FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ writel(reg_temp, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ reg_temp = FIELD_PREP(SP7021_SET_TX_LEN, xfer_len) |
+ FIELD_PREP(SP7021_SET_XFER_LEN, xfer_len) |
+ SP7021_SPI_START_FD;
+ writel(reg_temp, pspim->m_base + SP7021_SPI_STATUS_REG);
+
+ if (!wait_for_completion_interruptible_timeout(&pspim->isr_done, timeout)) {
+ dev_err(&spi->dev, "wait_for_completion err\n");
+ mutex_unlock(&pspim->buf_lock);
+ return -ETIMEDOUT;
+ }
+
+ reg_temp = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ if (reg_temp & SP7021_FINISH_FLAG) {
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ writel(readl(pspim->m_base + SP7021_SPI_CONFIG_REG) &
+ SP7021_CLEAN_FLUG_MASK, pspim->m_base + SP7021_SPI_CONFIG_REG);
+ }
+
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ mutex_unlock(&pspim->buf_lock);
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct device *dev = pspim->dev;
+ int ret;
+
+ if (xfer->tx_buf && !xfer->rx_buf) {
+ xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_tx(spi, xfer);
+ dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
+ } else if (xfer->rx_buf && !xfer->tx_buf) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_rx(spi, xfer);
+ dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
+ } else {
+ dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
+ return -EINVAL;
+ }
+
+ spi_finalize_current_transfer(ctlr);
+ return ret;
+}
+
+static void sp7021_spi_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void sp7021_spi_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int sp7021_spi_controller_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp7021_spi_ctlr *pspim;
+ struct spi_controller *ctlr;
+ int mode, ret;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
+
+ if (device_property_read_bool(dev, "spi-slave"))
+ mode = SP7021_SLAVE_MODE;
+ else
+ mode = SP7021_MASTER_MODE;
+
+ if (mode == SP7021_SLAVE_MODE)
+ ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
+ else
+ ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
+ if (!ctlr)
+ return -ENOMEM;
+ device_set_node(&ctlr->dev, dev_fwnode(dev));
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ ctlr->auto_runtime_pm = true;
+ ctlr->prepare_message = sp7021_spi_controller_prepare_message;
+ if (mode == SP7021_SLAVE_MODE) {
+ ctlr->transfer_one = sp7021_spi_slave_transfer_one;
+ ctlr->slave_abort = sp7021_spi_slave_abort;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ } else {
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->min_speed_hz = 40000;
+ ctlr->max_speed_hz = 25000000;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ ctlr->transfer_one = sp7021_spi_master_transfer_one;
+ }
+ platform_set_drvdata(pdev, ctlr);
+ pspim = spi_controller_get_devdata(ctlr);
+ pspim->mode = mode;
+ pspim->ctlr = ctlr;
+ pspim->dev = dev;
+ spin_lock_init(&pspim->lock);
+ mutex_init(&pspim->buf_lock);
+ init_completion(&pspim->isr_done);
+ init_completion(&pspim->slave_isr);
+
+ pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
+ if (IS_ERR(pspim->m_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->m_base), "m_base get fail\n");
+
+ pspim->s_base = devm_platform_ioremap_resource_byname(pdev, "slave");
+ if (IS_ERR(pspim->s_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->s_base), "s_base get fail\n");
+
+ pspim->m_irq = platform_get_irq_byname(pdev, "master_risc");
+ if (pspim->m_irq < 0)
+ return pspim->m_irq;
+
+ pspim->s_irq = platform_get_irq_byname(pdev, "slave_risc");
+ if (pspim->s_irq < 0)
+ return pspim->s_irq;
+
+ pspim->spi_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pspim->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(pspim->spi_clk), "clk get fail\n");
+
+ pspim->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(pspim->rstc))
+ return dev_err_probe(dev, PTR_ERR(pspim->rstc), "rst get fail\n");
+
+ ret = clk_prepare_enable(pspim->spi_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clk\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_disable_unprepare, pspim->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(pspim->rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert reset\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_reset_control_assert, pspim->rstc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "spi_register_master fail\n");
+ }
+ return 0;
+}
+
+static int sp7021_spi_controller_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+
+ spi_unregister_controller(ctlr);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ reset_control_deassert(pspim->rstc);
+ return clk_prepare_enable(pspim->spi_clk);
+}
+
+#ifdef CONFIG_PM
+static int sp7021_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int sp7021_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_deassert(pspim->rstc);
+}
+#endif
+
+static const struct dev_pm_ops sp7021_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sp7021_spi_runtime_suspend,
+ sp7021_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sp7021_spi_controller_suspend,
+ sp7021_spi_controller_resume)
+};
+
+static const struct of_device_id sp7021_spi_controller_ids[] = {
+ { .compatible = "sunplus,sp7021-spi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sp7021_spi_controller_ids);
+
+static struct platform_driver sp7021_spi_controller_driver = {
+ .probe = sp7021_spi_controller_probe,
+ .remove = sp7021_spi_controller_remove,
+ .driver = {
+ .name = "sunplus,sp7021-spi-controller",
+ .of_match_table = sp7021_spi_controller_ids,
+ .pm = &sp7021_spi_pm_ops,
+ },
+};
+module_platform_driver(sp7021_spi_controller_driver);
+
+MODULE_AUTHOR("Li-hao Kuo <lhjeff911@gmail.com>");
+MODULE_DESCRIPTION("Sunplus SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index e9de1d958bbd..8f345247a8c3 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1352,6 +1352,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
+ if (spi_irq < 0) {
+ ret = spi_irq;
+ goto exit_free_master;
+ }
tspi->irq = spi_irq;
tspi->clk = devm_clk_get(&pdev->dev, "spi");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 2a03739a0c60..80c3787deea9 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1006,14 +1006,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
struct resource *r;
int ret, spi_irq;
const struct tegra_slink_chip_data *cdata = NULL;
- const struct of_device_id *match;
- match = of_match_device(tegra_slink_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- cdata = match->data;
+ cdata = of_device_get_match_data(&pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index ce1bdb4767ea..66f647f32876 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -21,6 +21,8 @@
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
#define QSPI_COMMAND1 0x000
#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -119,11 +121,40 @@
#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
#define QSPI_DUMMY_CYCLES_MAX 0xff
+#define QSPI_CMB_SEQ_CMD 0x19c
+#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
+
+#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
+#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_COMMAND_SDR_DDR BIT(12)
+#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
+
+#define QSPI_GLOBAL_CONFIG 0X1a4
+#define QSPI_CMB_SEQ_EN BIT(0)
+
+#define QSPI_CMB_SEQ_ADDR 0x1a8
+#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
+
+#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
+#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_ADDRESS_SDR_DDR BIT(12)
+#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
+
#define DATA_DIR_TX BIT(0)
#define DATA_DIR_RX BIT(1)
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
+#define CMD_TRANSFER 0
+#define ADDR_TRANSFER 1
+#define DATA_TRANSFER 2
+
+struct tegra_qspi_soc_data {
+ bool has_dma;
+ bool cmb_xfer_capable;
+};
struct tegra_qspi_client_data {
int tx_clk_tap_delay;
@@ -137,7 +168,6 @@ struct tegra_qspi {
spinlock_t lock;
struct clk *clk;
- struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned int irq;
@@ -185,6 +215,7 @@ struct tegra_qspi {
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_qspi_soc_data *soc_data;
};
static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
@@ -767,7 +798,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
u32 tx_tap = 0, rx_tap = 0;
int req_mode;
- if (speed != tqspi->cur_speed) {
+ if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
clk_set_rate(tqspi->clk, speed);
tqspi->cur_speed = speed;
}
@@ -875,16 +906,16 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
- struct device_node *slave_np = spi->dev.of_node;
cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
- of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
- &cdata->tx_clk_tap_delay);
- of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
- &cdata->rx_clk_tap_delay);
+ device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+
return cdata;
}
@@ -906,7 +937,6 @@ static int tegra_qspi_setup(struct spi_device *spi)
cdata = tegra_qspi_parse_cdata_dt(spi);
spi->controller_data = cdata;
}
-
spin_lock_irqsave(&tqspi->lock, flags);
/* keep default cs state to inactive */
@@ -948,9 +978,8 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
tegra_qspi_dump_regs(tqspi);
tegra_qspi_flush_fifos(tqspi, true);
- reset_control_assert(tqspi->rst);
- udelay(2);
- reset_control_deassert(tqspi->rst);
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
}
static void tegra_qspi_transfer_end(struct spi_device *spi)
@@ -966,19 +995,179 @@ static void tegra_qspi_transfer_end(struct spi_device *spi)
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
}
-static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi_message *msg)
+static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 cmd_config = 0;
+
+ /* Extract Command configuration and value */
+ if (is_ddr)
+ cmd_config |= QSPI_COMMAND_SDR_DDR;
+ else
+ cmd_config &= ~QSPI_COMMAND_SDR_DDR;
+
+ cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
+ cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
+
+ return cmd_config;
+}
+
+static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 addr_config = 0;
+
+ /* Extract Address configuration and value */
+ is_ddr = 0; //Only SDR mode supported
+ bus_width = 0; //X1 mode
+
+ if (is_ddr)
+ addr_config |= QSPI_ADDRESS_SDR_DDR;
+ else
+ addr_config &= ~QSPI_ADDRESS_SDR_DDR;
+
+ addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
+ addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
+
+ return addr_config;
+}
+
+static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ u8 transfer_phase = 0;
+ u32 cmd1 = 0, dma_ctl = 0;
+ int ret = 0;
+ u32 address_value = 0;
+ u32 cmd_config = 0, addr_config = 0;
+ u8 cmd_value = 0, val = 0;
+
+ /* Enable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val |= QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
+ /* Process individual transfer list */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ switch (transfer_phase) {
+ case CMD_TRANSFER:
+ /* X1 SDR mode */
+ cmd_config = tegra_qspi_cmd_config(false, 0,
+ xfer->len);
+ cmd_value = *((const u8 *)(xfer->tx_buf));
+ break;
+ case ADDR_TRANSFER:
+ /* X1 SDR mode */
+ addr_config = tegra_qspi_addr_config(false, 0,
+ xfer->len);
+ address_value = *((const u32 *)(xfer->tx_buf));
+ break;
+ case DATA_TRANSFER:
+ /* Program Command, Address value in register */
+ tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
+ tegra_qspi_writel(tqspi, address_value,
+ QSPI_CMB_SEQ_ADDR);
+ /* Program Command and Address config in register */
+ tegra_qspi_writel(tqspi, cmd_config,
+ QSPI_CMB_SEQ_CMD_CFG);
+ tegra_qspi_writel(tqspi, addr_config,
+ QSPI_CMB_SEQ_ADDR_CFG);
+
+ reinit_completion(&tqspi->xfer_completion);
+ cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
+ is_first_msg);
+ ret = tegra_qspi_start_transfer_one(spi, xfer,
+ cmd1);
+
+ if (ret < 0) {
+ dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
+ ret);
+ return ret;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout
+ (&tqspi->xfer_completion,
+ QSPI_DMA_TIMEOUT);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
+ ret);
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all
+ (tqspi->tx_dma_chan);
+
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all
+ (tqspi->rx_dma_chan);
+
+ /* Abort transfer by resetting pio/dma bit */
+ if (!tqspi->is_curr_dma_xfer) {
+ cmd1 = tegra_qspi_readl
+ (tqspi,
+ QSPI_COMMAND1);
+ cmd1 &= ~QSPI_PIO;
+ tegra_qspi_writel
+ (tqspi, cmd1,
+ QSPI_COMMAND1);
+ } else {
+ dma_ctl = tegra_qspi_readl
+ (tqspi,
+ QSPI_DMA_CTL);
+ dma_ctl &= ~QSPI_DMA_EN;
+ tegra_qspi_writel(tqspi, dma_ctl,
+ QSPI_DMA_CTL);
+ }
+
+ /* Reset controller if timeout happens */
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev,
+ "device reset failed\n");
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ dev_err(tqspi->dev, "QSPI Transfer failed\n");
+ tqspi->tx_status = 0;
+ tqspi->rx_status = 0;
+ ret = -EIO;
+ goto exit;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ transfer_phase++;
+ }
+
+exit:
+ msg->status = ret;
+
+ return ret;
+}
+
+static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
bool is_first_msg = true;
- int ret;
+ int ret = 0, val = 0;
msg->status = 0;
msg->actual_length = 0;
tqspi->tx_status = 0;
tqspi->rx_status = 0;
+ /* Disable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val &= ~QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
struct spi_transfer *xfer = transfer;
u8 dummy_bytes = 0;
@@ -1016,7 +1205,6 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi
goto complete_xfer;
}
- is_first_msg = false;
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
@@ -1061,7 +1249,48 @@ complete_xfer:
ret = 0;
exit:
msg->status = ret;
+
+ return ret;
+}
+
+static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ int transfer_count = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ transfer_count++;
+ }
+ if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
+ return false;
+ xfer = list_first_entry(&msg->transfers, typeof(*xfer),
+ transfer_list);
+ if (xfer->len > 2)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (xfer->len > 4 || xfer->len < 3)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
+ return false;
+
+ return true;
+}
+
+static int tegra_qspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (tegra_qspi_validate_cmb_seq(tqspi, msg))
+ ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
+ else
+ ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
+
spi_finalize_current_message(master);
+
return ret;
}
@@ -1193,15 +1422,58 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
return handle_dma_based_xfer(tqspi);
}
+static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = false,
+};
+
+static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = true,
+};
+
+static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
+ .has_dma = false,
+ .cmb_xfer_capable = true,
+};
+
static const struct of_device_id tegra_qspi_of_match[] = {
- { .compatible = "nvidia,tegra210-qspi", },
- { .compatible = "nvidia,tegra186-qspi", },
- { .compatible = "nvidia,tegra194-qspi", },
+ {
+ .compatible = "nvidia,tegra210-qspi",
+ .data = &tegra210_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra186-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra194-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra234-qspi",
+ .data = &tegra234_qspi_soc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tegra_qspi_acpi_match[] = {
+ {
+ .id = "NVDA1213",
+ .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
+ }, {
+ .id = "NVDA1313",
+ .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
+ }, {
+ .id = "NVDA1413",
+ .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
+#endif
+
static int tegra_qspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -1233,6 +1505,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
tqspi->dev = &pdev->dev;
spin_lock_init(&tqspi->lock);
+ tqspi->soc_data = device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tqspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tqspi->base))
@@ -1240,20 +1513,18 @@ static int tegra_qspi_probe(struct platform_device *pdev)
tqspi->phys = r->start;
qspi_irq = platform_get_irq(pdev, 0);
+ if (qspi_irq < 0)
+ return qspi_irq;
tqspi->irq = qspi_irq;
- tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
- if (IS_ERR(tqspi->clk)) {
- ret = PTR_ERR(tqspi->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
- return ret;
- }
+ if (!has_acpi_companion(tqspi->dev)) {
+ tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
+ if (IS_ERR(tqspi->clk)) {
+ ret = PTR_ERR(tqspi->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ return ret;
+ }
- tqspi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(tqspi->rst)) {
- ret = PTR_ERR(tqspi->rst);
- dev_err(&pdev->dev, "failed to get reset control: %d\n", ret);
- return ret;
}
tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
@@ -1277,9 +1548,8 @@ static int tegra_qspi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- reset_control_assert(tqspi->rst);
- udelay(2);
- reset_control_deassert(tqspi->rst);
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
@@ -1358,6 +1628,9 @@ static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
/* flush all write which are in PPSB queue by reading back */
tegra_qspi_readl(tqspi, QSPI_COMMAND1);
@@ -1372,6 +1645,9 @@ static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
int ret;
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
ret = clk_prepare_enable(tqspi->clk);
if (ret < 0)
dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
@@ -1389,6 +1665,7 @@ static struct platform_driver tegra_qspi_driver = {
.name = "tegra-qspi",
.pm = &tegra_qspi_pm_ops,
.of_match_table = tegra_qspi_of_match,
+ .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
},
.probe = tegra_qspi_probe,
.remove = tegra_qspi_remove,
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index f8ad0709d015..a565352f6381 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -288,7 +288,7 @@ static int tle62x0_probe(struct spi_device *spi)
return ret;
}
-static int tle62x0_remove(struct spi_device *spi)
+static void tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
@@ -298,7 +298,6 @@ static int tle62x0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
- return 0;
}
static struct spi_driver tle62x0_driver = {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 8c4615b76339..dfaa1d79a78b 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -103,6 +103,7 @@
static int use_dma = 1;
struct pch_spi_dma_ctrl {
+ struct pci_dev *dma_dev;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
struct pch_dma_slave param_tx;
@@ -876,8 +877,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
- data->use_dma = 0;
- return;
+ goto out;
}
dma->chan_tx = chan;
@@ -893,10 +893,15 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
- data->use_dma = 0;
- return;
+ goto out;
}
dma->chan_rx = chan;
+
+ dma->dma_dev = dma_dev;
+ return;
+out:
+ pci_dev_put(dma_dev);
+ data->use_dma = 0;
}
static void pch_spi_release_dma(struct pch_spi_data *data)
@@ -912,6 +917,8 @@ static void pch_spi_release_dma(struct pch_spi_data *data)
dma_release_channel(dma->chan_rx);
dma->chan_rx = NULL;
}
+
+ pci_dev_put(dma->dma_dev);
}
static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 342ee8d2c476..cc0da4822231 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_tx_burst = caps.max_burst;
}
@@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(master->dma_rx)) {
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto out_disable_clk;
+ goto out_release_dma;
}
master->dma_rx = NULL;
dma_rx_burst = INT_MAX;
@@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_rx_burst = caps.max_burst;
}
@@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
- goto out_disable_clk;
+ goto out_release_dma;
return 0;
+out_release_dma:
+ if (!IS_ERR_OR_NULL(master->dma_rx)) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (!IS_ERR_OR_NULL(master->dma_tx)) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
out_disable_clk:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index cfa222c9bd5e..78f31b61a2aa 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
if (op->dummy.nbytes) {
tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
memset(tmpbuf, 0xff, op->dummy.nbytes);
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = tmpbuf;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 328b6559bb19..2b5afae8ff7f 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1172,7 +1172,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret)
+ goto clk_dis_all;
+
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4599b121d744..c4dd1200fe99 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -18,7 +18,6 @@
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
@@ -144,7 +143,7 @@ static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
unsigned long flags; \
ssize_t len; \
spin_lock_irqsave(&stat->lock, flags); \
- len = sprintf(buf, format_string, stat->field); \
+ len = sysfs_emit(buf, format_string "\n", stat->field); \
spin_unlock_irqrestore(&stat->lock, flags); \
return len; \
} \
@@ -404,15 +403,8 @@ static void spi_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- if (sdrv->remove) {
- int ret;
-
- ret = sdrv->remove(to_spi_device(dev));
- if (ret)
- dev_warn(dev,
- "Failed to unbind driver (%pe), ignoring\n",
- ERR_PTR(ret));
- }
+ if (sdrv->remove)
+ sdrv->remove(to_spi_device(dev));
dev_pm_domain_detach(dev, true);
}
@@ -532,7 +524,7 @@ static DEFINE_MUTEX(board_lock);
*
* Return: a pointer to the new device, or NULL.
*/
-static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
+struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
@@ -549,7 +541,6 @@ static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
- spi->cs_gpio = -ENOENT;
spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -557,6 +548,7 @@ static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
device_initialize(&spi->dev);
return spi;
}
+EXPORT_SYMBOL_GPL(spi_alloc_device);
static void spi_dev_set_name(struct spi_device *spi)
{
@@ -612,11 +604,8 @@ static int __spi_add_device(struct spi_device *spi)
return -ENODEV;
}
- /* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
- else if (ctlr->cs_gpios)
- spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/*
* Drivers may modify this initial i/o setup, but will
@@ -652,7 +641,7 @@ static int __spi_add_device(struct spi_device *spi)
*
* Return: 0 on success; negative errno on failure
*/
-static int spi_add_device(struct spi_device *spi)
+int spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
@@ -673,6 +662,7 @@ static int spi_add_device(struct spi_device *spi)
mutex_unlock(&ctlr->add_lock);
return status;
}
+EXPORT_SYMBOL_GPL(spi_add_device);
static int spi_add_device_locked(struct spi_device *spi)
{
@@ -936,48 +926,40 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && (spi->controller->last_cs_enable == enable) &&
+ if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
+ (!enable && spi->controller->last_cs != spi->chip_select)) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs_enable = enable;
+ spi->controller->last_cs = enable ? spi->chip_select : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
- !spi->controller->set_cs_timing) && !activate) {
+ if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
spi_delay_exec(&spi->cs_hold, NULL);
}
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ if (spi->cs_gpiod) {
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod) {
- /*
- * Historically ACPI has no means of the GPIO polarity and
- * thus the SPISerialBus() resource defines it on the per-chip
- * basis. In order to avoid a chain of negations, the GPIO
- * polarity is considered being Active High. Even for the cases
- * when _DSD() is involved (in the updated versions of ACPI)
- * the GPIO CS polarity must be defined Active High to avoid
- * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
- * into account.
- */
- if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
- else
- /* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
- } else {
- /*
- * Invert the enable line, as active low is
- * default for SPI.
- */
- gpio_set_value_cansleep(spi->cs_gpio, !enable);
- }
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -987,8 +969,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->set_cs(spi, !enable);
}
- if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
- !spi->controller->set_cs_timing) {
+ if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
@@ -1019,10 +1000,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
int i, ret;
if (vmalloced_buf || kmap_buf) {
- desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -2318,8 +2299,50 @@ struct acpi_spi_lookup {
int irq;
u8 bits_per_word;
u8 chip_select;
+ int n;
+ int index;
};
+static int acpi_spi_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_spi_serialbus *sb;
+ int *count = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
+ return 1;
+
+ *count = *count + 1;
+
+ return 1;
+}
+
+/**
+ * acpi_spi_count_resources - Count the number of SpiSerialBus resources
+ * @adev: ACPI device
+ *
+ * Returns the number of SpiSerialBus resources in the ACPI-device's
+ * resource-list; or a negative error code.
+ */
+int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ LIST_HEAD(r);
+ int count = 0;
+ int ret;
+
+ ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
+
static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
struct acpi_spi_lookup *lookup)
{
@@ -2349,6 +2372,8 @@ static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
lookup->mode |= SPI_CPHA;
}
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct acpi_spi_lookup *lookup = data;
@@ -2362,14 +2387,35 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ if (lookup->index == -1 && !ctlr)
+ return -ENODEV;
+
status = acpi_get_handle(NULL,
sb->resource_source.string_ptr,
&parent_handle);
- if (ACPI_FAILURE(status) ||
- ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ if (ACPI_FAILURE(status))
return -ENODEV;
+ if (ctlr) {
+ if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ return -ENODEV;
+ } else {
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(parent_handle, &adev))
+ return -ENODEV;
+
+ ctlr = acpi_spi_find_controller_by_adev(adev);
+ if (!ctlr)
+ return -ENODEV;
+
+ lookup->ctlr = ctlr;
+ }
+
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
@@ -2408,8 +2454,25 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return 1;
}
-static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
- struct acpi_device *adev)
+/**
+ * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
+ * @ctlr: controller to which the spi device belongs
+ * @adev: ACPI Device for the spi device
+ * @index: Index of the spi resource inside the ACPI Node
+ *
+ * This should be used to allocate a new spi device from and ACPI Node.
+ * The caller is responsible for calling spi_add_device to register the spi device.
+ *
+ * If ctlr is set to NULL, the Controller for the spi device will be looked up
+ * using the resource.
+ * If index is set to -1, index is not used.
+ * Note: If index is -1, ctlr must be set.
+ *
+ * Return: a pointer to the new device, or ERR_PTR on error.
+ */
+struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
{
acpi_handle parent_handle = NULL;
struct list_head resource_list;
@@ -2417,12 +2480,13 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct spi_device *spi;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
+ if (!ctlr && index == -1)
+ return ERR_PTR(-EINVAL);
lookup.ctlr = ctlr;
lookup.irq = -1;
+ lookup.index = index;
+ lookup.n = 0;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
@@ -2431,26 +2495,25 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
if (ret < 0)
/* found SPI in _CRS but it points to another controller */
- return AE_OK;
+ return ERR_PTR(-ENODEV);
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
- ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
+ ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
/* Apple does not use _CRS but nested devices for SPI slaves */
acpi_spi_parse_apple_properties(adev, &lookup);
}
if (!lookup.max_speed_hz)
- return AE_OK;
+ return ERR_PTR(-ENODEV);
- spi = spi_alloc_device(ctlr);
+ spi = spi_alloc_device(lookup.ctlr);
if (!spi) {
- dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
+ dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
dev_name(&adev->dev));
- return AE_NO_MEMORY;
+ return ERR_PTR(-ENOMEM);
}
-
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
@@ -2458,6 +2521,27 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
spi->bits_per_word = lookup.bits_per_word;
spi->chip_select = lookup.chip_select;
+ return spi;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
+
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
+ struct acpi_device *adev)
+{
+ struct spi_device *spi;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ spi = acpi_spi_device_alloc(ctlr, adev, -1);
+ if (IS_ERR(spi)) {
+ if (PTR_ERR(spi) == -ENOMEM)
+ return AE_NO_MEMORY;
+ else
+ return AE_OK;
+ }
+
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
@@ -2480,10 +2564,10 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct spi_controller *ctlr = data;
- struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ if (!adev)
return AE_OK;
return acpi_register_spi_device(ctlr, adev);
@@ -2729,46 +2813,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
-#ifdef CONFIG_OF
-static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
-{
- int nb, i, *cs;
- struct device_node *np = ctlr->dev.of_node;
-
- if (!np)
- return 0;
-
- nb = of_gpio_named_count(np, "cs-gpios");
- ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
-
- /* Return error only for an incorrectly formed cs-gpios property */
- if (nb == 0 || nb == -ENOENT)
- return 0;
- else if (nb < 0)
- return nb;
-
- cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
- GFP_KERNEL);
- ctlr->cs_gpios = cs;
-
- if (!ctlr->cs_gpios)
- return -ENOMEM;
-
- for (i = 0; i < ctlr->num_chipselect; i++)
- cs[i] = -ENOENT;
-
- for (i = 0; i < nb; i++)
- cs[i] = of_get_named_gpio(np, "cs-gpios", i);
-
- return 0;
-}
-#else
-static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
-{
- return 0;
-}
-#endif
-
/**
* spi_get_gpio_descs() - grab chip select GPIOs for the master
* @ctlr: The SPI master to grab GPIO descriptors for
@@ -2953,22 +2997,15 @@ int spi_register_controller(struct spi_controller *ctlr)
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
- if (!spi_controller_is_slave(ctlr)) {
- if (ctlr->use_gpio_descriptors) {
- status = spi_get_gpio_descs(ctlr);
- if (status)
- goto free_bus_id;
- /*
- * A controller using GPIO descriptors always
- * supports SPI_CS_HIGH if need be.
- */
- ctlr->mode_bits |= SPI_CS_HIGH;
- } else {
- /* Legacy code path for GPIOs from DT */
- status = of_spi_get_gpio_numbers(ctlr);
- if (status)
- goto free_bus_id;
- }
+ if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ goto free_bus_id;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
}
/*
@@ -2980,6 +3017,9 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
+ /* setting last_cs to -1 means no chip selected */
+ ctlr->last_cs = -1;
+
status = device_add(&ctlr->dev);
if (status < 0)
goto free_bus_id;
@@ -3457,12 +3497,6 @@ int spi_setup(struct spi_device *spi)
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
SPI_NO_TX | SPI_NO_RX);
- /*
- * Nothing prevents from working with active-high CS in case if it
- * is driven by GPIO.
- */
- if (gpio_is_valid(spi->cs_gpio))
- bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
@@ -3588,8 +3622,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi->cs_gpiod ||
- gpio_is_valid(spi->cs_gpio))) {
+ spi->cs_gpiod)) {
size_t maxsize;
int ret;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index a5cceca8b82b..53a551714265 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -453,22 +453,29 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
dev_dbg(&spi->dev, "%d bits per word\n", tmp);
}
break;
- case SPI_IOC_WR_MAX_SPEED_HZ:
+ case SPI_IOC_WR_MAX_SPEED_HZ: {
+ u32 save;
+
retval = get_user(tmp, (__u32 __user *)arg);
- if (retval == 0) {
- u32 save = spi->max_speed_hz;
+ if (retval)
+ break;
+ if (tmp == 0) {
+ retval = -EINVAL;
+ break;
+ }
- spi->max_speed_hz = tmp;
- retval = spi_setup(spi);
- if (retval == 0) {
- spidev->speed_hz = tmp;
- dev_dbg(&spi->dev, "%d Hz (max)\n",
- spidev->speed_hz);
- }
- spi->max_speed_hz = save;
+ save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval == 0) {
+ spidev->speed_hz = tmp;
+ dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
}
- break;
+ spi->max_speed_hz = save;
+ break;
+ }
default:
/* segmented and/or full-duplex I/O request */
/* Check message and copy into scratch area */
@@ -803,7 +810,7 @@ static int spidev_probe(struct spi_device *spi)
return status;
}
-static int spidev_remove(struct spi_device *spi)
+static void spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -820,8 +827,6 @@ static int spidev_remove(struct spi_device *spi)
if (spidev->users == 0)
kfree(spidev);
mutex_unlock(&device_list_lock);
-
- return 0;
}
static struct spi_driver spidev_spi_driver = {
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index abe9395a0aef..861a154144e6 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
{
int rc;
+ par->fbtftops.reset(par);
+
rc = init_tearing_effect_line(par);
if (rc)
return rc;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 4cdec34e23d2..b68f5f9b7c78 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -272,21 +272,39 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+#define FBTFT_DT_TABLE(_compatible) \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _compatible }, \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, dt_ids);
+
+#define FBTFT_SPI_DRIVER(_name, _compatible, _display, _spi_ids) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static void fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ fbtft_remove_common(&spi->dev, info); \
+} \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .of_match_table = dt_ids, \
+ }, \
+ .id_table = _spi_ids, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+};
+
#define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \
\
-static int fbtft_driver_probe_spi(struct spi_device *spi) \
-{ \
- return fbtft_probe_common(_display, spi, NULL); \
-} \
- \
-static int fbtft_driver_remove_spi(struct spi_device *spi) \
-{ \
- struct fb_info *info = spi_get_drvdata(spi); \
- \
- fbtft_remove_common(&spi->dev, info); \
- return 0; \
-} \
- \
static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
{ \
return fbtft_probe_common(_display, NULL, pdev); \
@@ -300,22 +318,9 @@ static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
return 0; \
} \
\
-static const struct of_device_id dt_ids[] = { \
- { .compatible = _compatible }, \
- {}, \
-}; \
- \
-MODULE_DEVICE_TABLE(of, dt_ids); \
+FBTFT_DT_TABLE(_compatible) \
\
- \
-static struct spi_driver fbtft_driver_spi_driver = { \
- .driver = { \
- .name = _name, \
- .of_match_table = dt_ids, \
- }, \
- .probe = fbtft_driver_probe_spi, \
- .remove = fbtft_driver_remove_spi, \
-}; \
+FBTFT_SPI_DRIVER(_name, _compatible, _display, NULL) \
\
static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
@@ -334,7 +339,10 @@ static int __init fbtft_driver_module_init(void) \
ret = spi_register_driver(&fbtft_driver_spi_driver); \
if (ret < 0) \
return ret; \
- return platform_driver_register(&fbtft_driver_platform_driver); \
+ ret = platform_driver_register(&fbtft_driver_platform_driver); \
+ if (ret < 0) \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ return ret; \
} \
\
static void __exit fbtft_driver_module_exit(void) \
@@ -348,42 +356,15 @@ module_exit(fbtft_driver_module_exit);
#define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \
\
-static int fbtft_driver_probe_spi(struct spi_device *spi) \
-{ \
- return fbtft_probe_common(_display, spi, NULL); \
-} \
- \
-static int fbtft_driver_remove_spi(struct spi_device *spi) \
-{ \
- struct fb_info *info = spi_get_drvdata(spi); \
- \
- fbtft_remove_common(&spi->dev, info); \
- return 0; \
-} \
- \
-static const struct of_device_id dt_ids[] = { \
- { .compatible = _comp_vend "," _comp_dev }, \
- {}, \
-}; \
- \
-MODULE_DEVICE_TABLE(of, dt_ids); \
+FBTFT_DT_TABLE(_comp_vend "," _comp_dev) \
\
static const struct spi_device_id spi_ids[] = { \
{ .name = _comp_dev }, \
{}, \
}; \
- \
MODULE_DEVICE_TABLE(spi, spi_ids); \
\
-static struct spi_driver fbtft_driver_spi_driver = { \
- .driver = { \
- .name = _name, \
- .of_match_table = dt_ids, \
- }, \
- .id_table = spi_ids, \
- .probe = fbtft_driver_probe_spi, \
- .remove = fbtft_driver_remove_spi, \
-}; \
+FBTFT_SPI_DRIVER(_name, _comp_vend "," _comp_dev, _display, spi_ids) \
\
module_spi_driver(fbtft_driver_spi_driver);
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 493ed4821515..0d8d8fed283d 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -76,14 +76,15 @@ static void tx_complete(void *arg)
static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
{
- int ret;
+ int ret, len;
+ len = skb->len + ETH_HLEN;
ret = netif_rx_ni(skb);
if (ret == NET_RX_DROP) {
nic->stats.rx_dropped++;
} else {
nic->stats.rx_packets++;
- nic->stats.rx_bytes += skb->len + ETH_HLEN;
+ nic->stats.rx_bytes += len;
}
return 0;
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 7e6347fe93f9..8a7cf1d0e968 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -391,10 +391,7 @@ static int gb_gpio_request_handler(struct gb_operation *op)
return -EINVAL;
}
- local_irq_disable();
- ret = generic_handle_irq(irq);
- local_irq_enable();
-
+ ret = generic_handle_irq_safe(irq);
if (ret)
dev_err(dev, "failed to invoke irq handler\n");
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 68c09fa016ed..1d31c35875e3 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1264,7 +1264,7 @@ RX_failed:
return retval;
}
-static int pi433_remove(struct spi_device *spi)
+static void pi433_remove(struct spi_device *spi)
{
struct pi433_device *device = spi_get_drvdata(spi);
@@ -1284,8 +1284,6 @@ static int pi433_remove(struct spi_device *spi)
kfree(device->rx_buffer);
kfree(device);
-
- return 0;
}
static const struct of_device_id pi433_dt_ids[] = {
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 0f82f5031c43..49a3f45cb771 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5907,6 +5907,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
@@ -5917,7 +5918,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
msleep(10);/* 10ms, ATIM(HIQ) Windows */
- spin_lock_bh(&psta_bmc->sleep_q.lock);
+ /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -5940,7 +5942,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
}
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
/* check hi queue and bmc_sleepq */
rtw_chk_hi_queue_cmd(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 41bfca549c64..105fe0e3482a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -957,8 +957,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&psta->sleep_q.lock);
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -989,10 +991,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
}
- spin_unlock_bh(&psta->sleep_q.lock);
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
} else {
- spin_unlock_bh(&psta->sleep_q.lock);
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
if (psta->sleepq_len == 0) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 0c9ea1520fd0..beb11d89db18 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -293,48 +293,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
/* list_del_init(&psta->wakeup_list); */
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
+
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
- spin_unlock_bh(&psta->sleep_q.lock);
-
- spin_lock_bh(&pxmitpriv->lock);
/* vo */
- spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
phwxmit = pxmitpriv->hwxmits;
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
pstaxmitpriv->vo_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
/* vi */
- spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+1;
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
pstaxmitpriv->vi_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
/* be */
- spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+2;
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
pstaxmitpriv->be_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
/* bk */
- spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+3;
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
pstaxmitpriv->bk_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
spin_unlock_bh(&pxmitpriv->lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 13b8bd5ffabc..f466bfd248fb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1734,12 +1734,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead, *tmp;
struct xmit_frame *pxmitframe;
+ spin_lock_bh(&pframequeue->lock);
+
phead = get_list_head(pframequeue);
list_for_each_safe(plist, tmp, phead) {
pxmitframe = list_entry(plist, struct xmit_frame, list);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
+ spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1794,7 +1797,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct sta_info *psta;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct xmit_priv *xmit_priv = &padapter->xmitpriv;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
signed int res = _SUCCESS;
@@ -1812,14 +1814,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
- spin_lock_bh(&xmit_priv->lock);
if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
- spin_unlock_bh(&xmit_priv->lock);
exit:
@@ -2202,10 +2202,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2306,7 +2307,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
_exit:
- spin_unlock_bh(&psta->sleep_q.lock);
+ spin_unlock_bh(&pxmitpriv->lock);
if (update_mask)
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
@@ -2318,8 +2319,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2372,7 +2374,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
}
}
- spin_unlock_bh(&psta->sleep_q.lock);
+ spin_unlock_bh(&pxmitpriv->lock);
}
void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index b5d5e922231c..15810438a472 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -502,7 +502,9 @@ s32 rtl8723bs_hal_xmit(
rtw_issue_addbareq_cmd(padapter, pxmitframe);
}
+ spin_lock_bh(&pxmitpriv->lock);
err = rtw_xmitframe_enqueue(padapter, pxmitframe);
+ spin_unlock_bh(&pxmitpriv->lock);
if (err != _SUCCESS) {
rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index c94fa7d8d5a9..1b343b434f4d 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -102,13 +102,17 @@ there are several "locks" in mlme_priv,
since mlme_priv is a shared resource between many threads,
like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
Each struct __queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
+Other items in mlme_priv are protected by mlme_priv.lock, while items in
+xmit_priv are protected by xmit_priv.lock.
To avoid possible dead lock, any thread trying to modifiying mlme_priv
SHALL not lock up more than one locks at a time!
+The only exception is that queue functions which take the __queue.lock
+may be called with the xmit_priv.lock held. In this case the order
+MUST always be first lock xmit_priv.lock and then call any queue functions
+which take __queue.lock.
*/
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 6759a6261500..3a2e4582db8e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (WARN_ON(!service))
+ if (WARN_ON(!service)) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
user_service = (struct user_service *)service->base.userdata;
instance = user_service->instance;
- if (!instance || instance->closing)
+ if (!instance || instance->closing) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
+
+ /*
+ * As hopping around different synchronization mechanism,
+ * taking an extra reference results in simpler implementation.
+ */
+ vchiq_service_get(service);
+ rcu_read_unlock();
vchiq_log_trace(vchiq_arm_log_level,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
@@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
bulk_userdata);
if (status != VCHIQ_SUCCESS) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return status;
}
}
@@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
if (wait_for_completion_interruptible(&user_service->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_RETRY;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
header = NULL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
if (skip_completion)
return VCHIQ_SUCCESS;
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index 55ffcd7c42e2..fa0ff66a457d 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -232,12 +232,11 @@ static int wfx_spi_probe(struct spi_device *func)
return wfx_probe(bus->core);
}
-static int wfx_spi_remove(struct spi_device *func)
+static void wfx_spi_remove(struct spi_device *func)
{
struct wfx_spi_priv *bus = spi_get_drvdata(func);
wfx_release(bus->core);
- return 0;
}
/* For dynamic driver binding, kernel does not use OF to match driver. It only
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 8075f60fd02c..2d5cf1714ae0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal(
break;
}
spin_unlock(&tpg->tpg_np_lock);
+
+ if (match)
+ break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index bf8ae4825a06..87ede165ddba 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -353,18 +352,16 @@ static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
- bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
- &ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
+ GFP_NOIO, &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
- bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio->bi_opf = opf;
return bio;
}
@@ -418,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush;
- bio_set_dev(bio, ib_dev->ibd_bd);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 807d06ecadee..0fae71ac5cc8 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -17,7 +17,6 @@
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/module.h>
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 1ca320885fad..17a6f51d3089 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee)
optee_unregister_devices();
optee_notif_uninit(optee);
+ teedev_close_context(optee->ctx);
/*
* The two devices have to be unregistered before we can free the
* other resources.
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 20a1b1a3d965..f744ab15bf2c 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
*/
static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_shm *shm;
@@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
@@ -493,14 +494,13 @@ err_bad_param:
}
static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
- struct optee *optee = tee_get_drvdata(ctx->teedev);
-
arg->ret_origin = TEEC_ORIGIN_COMMS;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
- handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
@@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
}
}
-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
- struct optee_msg_arg *arg)
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
{
switch (cmd) {
case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
- handle_ffa_rpc_func_cmd(ctx, arg);
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
break;
case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
/* Interrupt delivered by now */
@@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* above.
*/
cond_resched();
- optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
data->data0 = cmd;
data->data1 = 0;
@@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
.data2 = (u32)(shm->sec_world_id >> 32),
.data3 = shm->offset,
};
- struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
- unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
- struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
return optee_ffa_yielding_call(ctx, &data, rpc_arg);
}
@@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_dev_ops *ffa_ops;
unsigned int rpc_arg_count;
+ struct tee_shm_pool *pool;
struct tee_device *teedev;
+ struct tee_context *ctx;
struct optee *optee;
int rc;
@@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (!optee)
return -ENOMEM;
- optee->pool = optee_ffa_config_dyn_shm();
- if (IS_ERR(optee->pool)) {
- rc = PTR_ERR(optee->pool);
- optee->pool = NULL;
- goto err;
+ pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
}
+ optee->pool = pool;
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
@@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_pool;
}
optee->teedev = teedev;
@@ -837,50 +848,59 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
- rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_rhashtable_free;
}
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc)
+ goto err_close_ctx;
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_unregister_devices;
pr_info("initialized driver\n");
return 0;
-err:
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
tee_device_unregister(optee->teedev);
- if (optee->pool)
- tee_shm_pool_free(optee->pool);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
kfree(optee);
return rc;
}
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index a28fa03dcd0e..05212842b0a5 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key)
void optee_notif_uninit(struct optee *optee)
{
- kfree(optee->notif.bitmap);
+ bitmap_free(optee->notif.bitmap);
}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 46f74ab07c7e..92bc47bef95f 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,7 +53,6 @@ struct optee_call_queue {
struct optee_notif {
u_int max_key;
- struct tee_context *ctx;
/* Serializes access to the elements below in this struct */
spinlock_t lock;
struct list_head db;
@@ -134,9 +133,10 @@ struct optee_ops {
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @teedev: client device
* @ops: internal callbacks for different ways to reach secure
* world
- * @teedev: client device
+ * @ctx: driver internal TEE context
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
@@ -152,6 +152,7 @@ struct optee {
struct tee_device *supp_teedev;
struct tee_device *teedev;
const struct optee_ops *ops;
+ struct tee_context *ctx;
union {
struct optee_smc smc;
struct optee_ffa ffa;
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 449d6a72d289..c517d310249f 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
-
return 0;
}
@@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
@@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(optee->ctx, sz,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
+ shm = tee_shm_alloc(optee->ctx, param->a1,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(&param->a1, &param->a2, pa);
@@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->notif.ctx);
+ optee_smc_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
{
- struct tee_context *ctx;
int rc;
- ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- optee->notif.ctx = ctx;
rc = request_threaded_irq(irq, notif_irq_handler,
notif_irq_thread_fn,
0, "optee_notification", optee);
if (rc)
- goto err_close_ctx;
+ return rc;
optee->smc.notif_irq = irq;
return 0;
-
-err_close_ctx:
- teedev_close_context(optee->notif.ctx);
- optee->notif.ctx = NULL;
-
- return rc;
}
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
- if (optee->notif.ctx) {
- optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
free_irq(optee->smc.notif_irq, optee);
irq_dispose_mapping(optee->smc.notif_irq);
}
-
- /*
- * The thread normally working with optee->notif.ctx was
- * stopped with free_irq() above.
- *
- * Note we're not using teedev_close_context() or
- * tee_client_close_context() since we have already called
- * tee_device_put() while initializing to avoid a circular
- * reference counting.
- */
- teedev_close_context(optee->notif.ctx);
}
}
@@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ struct tee_context *ctx;
u32 max_notif_value;
u32 sec_caps;
int rc;
@@ -1446,9 +1416,15 @@ static int optee_probe(struct platform_device *pdev)
optee->pool = pool;
platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_supp_uninit;
+ }
+ optee->ctx = ctx;
rc = optee_notif_init(optee, max_notif_value);
if (rc)
- goto err_supp_uninit;
+ goto err_close_ctx;
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
unsigned int irq;
@@ -1496,6 +1472,8 @@ err_disable_shm_cache:
optee_unregister_devices();
err_notif_uninit:
optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
err_supp_uninit:
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 8df5edef1ded..0cedb8b4f00a 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -351,7 +351,7 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->thermal = thermal;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
brcmstb_tmon_irq_thread,
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index c83ea5d04a1d..f0c845679250 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -99,3 +99,17 @@ config INTEL_MENLOW
Intel Menlow platform.
If unsure, say N.
+
+config INTEL_HFI_THERMAL
+ bool "Intel Hardware Feedback Interface"
+ depends on NET
+ depends on CPU_SUP_INTEL
+ depends on X86_THERMAL_VECTOR
+ select THERMAL_NETLINK
+ help
+ Select this option to enable the Hardware Feedback Interface. If
+ selected, hardware provides guidance to the operating system on
+ the performance and energy efficiency capabilities of each CPU.
+ These capabilities may change as a result of changes in the operating
+ conditions of the system such power and thermal limits. If selected,
+ the kernel relays updates in CPUs' capabilities to userspace.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index 960b56268b4a..9a8d8054f316 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
+obj-$(CONFIG_INTEL_HFI_THERMAL) += intel_hfi.o
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index e90690a234c4..01b80331eab6 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -72,7 +72,6 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
int i;
int nr_bad_entries = 0;
struct trt *trts;
- struct acpi_device *adev;
union acpi_object *p;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer element = { 0, NULL };
@@ -112,12 +111,10 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
if (!create_dev)
continue;
- result = acpi_bus_get_device(trt->source, &adev);
- if (result)
+ if (!acpi_fetch_acpi_dev(trt->source))
pr_warn("Failed to get source ACPI device\n");
- result = acpi_bus_get_device(trt->target, &adev);
- if (result)
+ if (!acpi_fetch_acpi_dev(trt->target))
pr_warn("Failed to get target ACPI device\n");
}
@@ -149,7 +146,6 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
int i;
int nr_bad_entries = 0;
struct art *arts;
- struct acpi_device *adev;
union acpi_object *p;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer element = { 0, NULL };
@@ -191,16 +187,11 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (!create_dev)
continue;
- if (art->source) {
- result = acpi_bus_get_device(art->source, &adev);
- if (result)
- pr_warn("Failed to get source ACPI device\n");
- }
- if (art->target) {
- result = acpi_bus_get_device(art->target, &adev);
- if (result)
- pr_warn("Failed to get target ACPI device\n");
- }
+ if (!acpi_fetch_acpi_dev(art->source))
+ pr_warn("Failed to get source ACPI device\n");
+
+ if (!acpi_fetch_acpi_dev(art->target))
+ pr_warn("Failed to get target ACPI device\n");
}
*artp = arts;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 72acb1f61849..4954800b9850 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -17,8 +17,8 @@
#define INT3400_KEEP_ALIVE 0xA0
enum int3400_thermal_uuid {
+ INT3400_THERMAL_ACTIVE = 0,
INT3400_THERMAL_PASSIVE_1,
- INT3400_THERMAL_ACTIVE,
INT3400_THERMAL_CRITICAL,
INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
INT3400_THERMAL_EMERGENCY_CALL_MODE,
@@ -31,8 +31,8 @@ enum int3400_thermal_uuid {
};
static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
- "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
+ "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
"63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
"5349962F-71E6-431D-9AE8-0A635B710AEE",
@@ -53,12 +53,13 @@ struct int3400_thermal_priv {
struct art *arts;
int trt_count;
struct trt *trts;
- u8 uuid_bitmap;
+ u32 uuid_bitmap;
int rel_misc_dev_res;
int current_uuid_index;
char *data_vault;
int odvp_count;
int *odvp;
+ u32 os_uuid_mask;
struct odvp_attr *odvp_attrs;
};
@@ -142,12 +143,55 @@ static ssize_t current_uuid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ int i, length = 0;
- if (priv->current_uuid_index == -1)
- return sprintf(buf, "INVALID\n");
+ if (priv->current_uuid_index > 0)
+ return sprintf(buf, "%s\n",
+ int3400_thermal_uuids[priv->current_uuid_index]);
- return sprintf(buf, "%s\n",
- int3400_thermal_uuids[priv->current_uuid_index]);
+ for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) {
+ if (priv->os_uuid_mask & BIT(i))
+ length += scnprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
+ }
+
+ if (length)
+ return length;
+
+ return sprintf(buf, "INVALID\n");
+}
+
+static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enable)
+{
+ u32 ret, buf[2];
+ acpi_status status;
+ int result = 0;
+ struct acpi_osc_context context = {
+ .uuid_str = NULL,
+ .rev = 1,
+ .cap.length = 8,
+ };
+
+ context.uuid_str = uuid_str;
+
+ buf[OSC_QUERY_DWORD] = 0;
+ buf[OSC_SUPPORT_DWORD] = *enable;
+
+ context.cap.pointer = buf;
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_SUCCESS(status)) {
+ ret = *((u32 *)(context.ret.pointer + 4));
+ if (ret != *enable)
+ result = -EPERM;
+ } else
+ result = -EPERM;
+
+ kfree(context.ret.pointer);
+
+ return result;
}
static ssize_t current_uuid_store(struct device *dev,
@@ -164,16 +208,47 @@ static ssize_t current_uuid_store(struct device *dev,
* If we have a list of supported UUIDs, make sure
* this one is supported.
*/
- if (priv->uuid_bitmap &&
- !(priv->uuid_bitmap & (1 << i)))
+ if (priv->uuid_bitmap & BIT(i)) {
+ priv->current_uuid_index = i;
+ return count;
+ }
+
+ /*
+ * There is support of only 3 policies via the new
+ * _OSC to inform OS capability:
+ * INT3400_THERMAL_ACTIVE
+ * INT3400_THERMAL_PASSIVE_1
+ * INT3400_THERMAL_CRITICAL
+ */
+
+ if (i > INT3400_THERMAL_CRITICAL)
return -EINVAL;
- priv->current_uuid_index = i;
- return count;
+ priv->os_uuid_mask |= BIT(i);
+
+ break;
}
}
- return -EINVAL;
+ if (priv->os_uuid_mask) {
+ int cap, ret;
+
+ /*
+ * Capability bits:
+ * Bit 0: set to 1 to indicate DPTF is active
+ * Bi1 1: set to 1 to active cooling is supported by user space daemon
+ * Bit 2: set to 1 to passive cooling is supported by user space daemon
+ * Bit 3: set to 1 to critical trip is handled by user space daemon
+ */
+ cap = ((priv->os_uuid_mask << 1) | 0x01);
+ ret = int3400_thermal_run_osc(priv->adev->handle,
+ "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
+ &cap);
+ if (ret)
+ return ret;
+ }
+
+ return count;
}
static DEVICE_ATTR_RW(current_uuid);
@@ -236,41 +311,6 @@ end:
return result;
}
-static int int3400_thermal_run_osc(acpi_handle handle,
- enum int3400_thermal_uuid uuid, bool enable)
-{
- u32 ret, buf[2];
- acpi_status status;
- int result = 0;
- struct acpi_osc_context context = {
- .uuid_str = NULL,
- .rev = 1,
- .cap.length = 8,
- };
-
- if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
- return -EINVAL;
-
- context.uuid_str = int3400_thermal_uuids[uuid];
-
- buf[OSC_QUERY_DWORD] = 0;
- buf[OSC_SUPPORT_DWORD] = enable;
-
- context.cap.pointer = buf;
-
- status = acpi_run_osc(handle, &context);
- if (ACPI_SUCCESS(status)) {
- ret = *((u32 *)(context.ret.pointer + 4));
- if (ret != enable)
- result = -EPERM;
- } else
- result = -EPERM;
-
- kfree(context.ret.pointer);
-
- return result;
-}
-
static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
@@ -404,6 +444,10 @@ static void int3400_notify(acpi_handle handle,
thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
thermal_prop[4] = NULL;
kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+ kfree(thermal_prop[0]);
+ kfree(thermal_prop[1]);
+ kfree(thermal_prop[2]);
+ kfree(thermal_prop[3]);
}
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
@@ -422,10 +466,18 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
if (!priv)
return -EINVAL;
- if (mode != thermal->mode)
+ if (mode != thermal->mode) {
+ int enabled;
+
+ if (priv->current_uuid_index < 0 ||
+ priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
+ return -EINVAL;
+
+ enabled = (mode == THERMAL_DEVICE_ENABLED);
result = int3400_thermal_run_osc(priv->adev->handle,
- priv->current_uuid_index,
- mode == THERMAL_DEVICE_ENABLED);
+ int3400_thermal_uuids[priv->current_uuid_index],
+ &enabled);
+ }
evaluate_odvp(priv);
@@ -464,6 +516,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
+ if (!priv->data_vault) {
+ kfree(buffer.pointer);
+ return;
+ }
+
bin_attr_data_vault.private = priv->data_vault;
bin_attr_data_vault.size = obj->package.elements[0].buffer.length;
kfree(buffer.pointer);
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
new file mode 100644
index 000000000000..730fd121df6e
--- /dev/null
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hardware Feedback Interface Driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ *
+ * Authors: Aubrey Li <aubrey.li@linux.intel.com>
+ * Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+ *
+ *
+ * The Hardware Feedback Interface provides a performance and energy efficiency
+ * capability information for each CPU in the system. Depending on the processor
+ * model, hardware may periodically update these capabilities as a result of
+ * changes in the operating conditions (e.g., power limits or thermal
+ * constraints). On other processor models, there is a single HFI update
+ * at boot.
+ *
+ * This file provides functionality to process HFI updates and relay these
+ * updates to userspace.
+ */
+
+#define pr_fmt(fmt) "intel-hfi: " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpufeature.h>
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/percpu-defs.h>
+#include <linux/printk.h>
+#include <linux/processor.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/topology.h>
+#include <linux/workqueue.h>
+
+#include <asm/msr.h>
+
+#include "../thermal_core.h"
+#include "intel_hfi.h"
+
+#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \
+ BIT(9) | BIT(11) | BIT(26))
+
+/* Hardware Feedback Interface MSR configuration bits */
+#define HW_FEEDBACK_PTR_VALID_BIT BIT(0)
+#define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0)
+
+/* CPUID detection and enumeration definitions for HFI */
+
+#define CPUID_HFI_LEAF 6
+
+union hfi_capabilities {
+ struct {
+ u8 performance:1;
+ u8 energy_efficiency:1;
+ u8 __reserved:6;
+ } split;
+ u8 bits;
+};
+
+union cpuid6_edx {
+ struct {
+ union hfi_capabilities capabilities;
+ u32 table_pages:4;
+ u32 __reserved:4;
+ s32 index:16;
+ } split;
+ u32 full;
+};
+
+/**
+ * struct hfi_cpu_data - HFI capabilities per CPU
+ * @perf_cap: Performance capability
+ * @ee_cap: Energy efficiency capability
+ *
+ * Capabilities of a logical processor in the HFI table. These capabilities are
+ * unitless.
+ */
+struct hfi_cpu_data {
+ u8 perf_cap;
+ u8 ee_cap;
+} __packed;
+
+/**
+ * struct hfi_hdr - Header of the HFI table
+ * @perf_updated: Hardware updated performance capabilities
+ * @ee_updated: Hardware updated energy efficiency capabilities
+ *
+ * Properties of the data in an HFI table.
+ */
+struct hfi_hdr {
+ u8 perf_updated;
+ u8 ee_updated;
+} __packed;
+
+/**
+ * struct hfi_instance - Representation of an HFI instance (i.e., a table)
+ * @local_table: Base of the local copy of the HFI table
+ * @timestamp: Timestamp of the last update of the local table.
+ * Located at the base of the local table.
+ * @hdr: Base address of the header of the local table
+ * @data: Base address of the data of the local table
+ * @cpus: CPUs represented in this HFI table instance
+ * @hw_table: Pointer to the HFI table of this instance
+ * @update_work: Delayed work to process HFI updates
+ * @table_lock: Lock to protect acceses to the table of this instance
+ * @event_lock: Lock to process HFI interrupts
+ *
+ * A set of parameters to parse and navigate a specific HFI table.
+ */
+struct hfi_instance {
+ union {
+ void *local_table;
+ u64 *timestamp;
+ };
+ void *hdr;
+ void *data;
+ cpumask_var_t cpus;
+ void *hw_table;
+ struct delayed_work update_work;
+ raw_spinlock_t table_lock;
+ raw_spinlock_t event_lock;
+};
+
+/**
+ * struct hfi_features - Supported HFI features
+ * @nr_table_pages: Size of the HFI table in 4KB pages
+ * @cpu_stride: Stride size to locate the capability data of a logical
+ * processor within the table (i.e., row stride)
+ * @hdr_size: Size of the table header
+ *
+ * Parameters and supported features that are common to all HFI instances
+ */
+struct hfi_features {
+ unsigned int nr_table_pages;
+ unsigned int cpu_stride;
+ unsigned int hdr_size;
+};
+
+/**
+ * struct hfi_cpu_info - Per-CPU attributes to consume HFI data
+ * @index: Row of this CPU in its HFI table
+ * @hfi_instance: Attributes of the HFI table to which this CPU belongs
+ *
+ * Parameters to link a logical processor to an HFI table and a row within it.
+ */
+struct hfi_cpu_info {
+ s16 index;
+ struct hfi_instance *hfi_instance;
+};
+
+static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 };
+
+static int max_hfi_instances;
+static struct hfi_instance *hfi_instances;
+
+static struct hfi_features hfi_features;
+static DEFINE_MUTEX(hfi_instance_lock);
+
+static struct workqueue_struct *hfi_updates_wq;
+#define HFI_UPDATE_INTERVAL HZ
+#define HFI_MAX_THERM_NOTIFY_COUNT 16
+
+static void get_hfi_caps(struct hfi_instance *hfi_instance,
+ struct thermal_genl_cpu_caps *cpu_caps)
+{
+ int cpu, i = 0;
+
+ raw_spin_lock_irq(&hfi_instance->table_lock);
+ for_each_cpu(cpu, hfi_instance->cpus) {
+ struct hfi_cpu_data *caps;
+ s16 index;
+
+ index = per_cpu(hfi_cpu_info, cpu).index;
+ caps = hfi_instance->data + index * hfi_features.cpu_stride;
+ cpu_caps[i].cpu = cpu;
+
+ /*
+ * Scale performance and energy efficiency to
+ * the [0, 1023] interval that thermal netlink uses.
+ */
+ cpu_caps[i].performance = caps->perf_cap << 2;
+ cpu_caps[i].efficiency = caps->ee_cap << 2;
+
+ ++i;
+ }
+ raw_spin_unlock_irq(&hfi_instance->table_lock);
+}
+
+/*
+ * Call update_capabilities() when there are changes in the HFI table.
+ */
+static void update_capabilities(struct hfi_instance *hfi_instance)
+{
+ struct thermal_genl_cpu_caps *cpu_caps;
+ int i = 0, cpu_count;
+
+ /* CPUs may come online/offline while processing an HFI update. */
+ mutex_lock(&hfi_instance_lock);
+
+ cpu_count = cpumask_weight(hfi_instance->cpus);
+
+ /* No CPUs to report in this hfi_instance. */
+ if (!cpu_count)
+ goto out;
+
+ cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL);
+ if (!cpu_caps)
+ goto out;
+
+ get_hfi_caps(hfi_instance, cpu_caps);
+
+ if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT)
+ goto last_cmd;
+
+ /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */
+ for (i = 0;
+ (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count;
+ i += HFI_MAX_THERM_NOTIFY_COUNT)
+ thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT,
+ &cpu_caps[i]);
+
+ cpu_count = cpu_count - i;
+
+last_cmd:
+ /* Process the remaining capabilities if any. */
+ if (cpu_count)
+ thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]);
+
+ kfree(cpu_caps);
+out:
+ mutex_unlock(&hfi_instance_lock);
+}
+
+static void hfi_update_work_fn(struct work_struct *work)
+{
+ struct hfi_instance *hfi_instance;
+
+ hfi_instance = container_of(to_delayed_work(work), struct hfi_instance,
+ update_work);
+ if (!hfi_instance)
+ return;
+
+ update_capabilities(hfi_instance);
+}
+
+void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
+{
+ struct hfi_instance *hfi_instance;
+ int cpu = smp_processor_id();
+ struct hfi_cpu_info *info;
+ u64 new_timestamp;
+
+ if (!pkg_therm_status_msr_val)
+ return;
+
+ info = &per_cpu(hfi_cpu_info, cpu);
+ if (!info)
+ return;
+
+ /*
+ * A CPU is linked to its HFI instance before the thermal vector in the
+ * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL
+ * when receiving an HFI event.
+ */
+ hfi_instance = info->hfi_instance;
+ if (unlikely(!hfi_instance)) {
+ pr_debug("Received event on CPU %d but instance was null", cpu);
+ return;
+ }
+
+ /*
+ * On most systems, all CPUs in the package receive a package-level
+ * thermal interrupt when there is an HFI update. It is sufficient to
+ * let a single CPU to acknowledge the update and queue work to
+ * process it. The remaining CPUs can resume their work.
+ */
+ if (!raw_spin_trylock(&hfi_instance->event_lock))
+ return;
+
+ /* Skip duplicated updates. */
+ new_timestamp = *(u64 *)hfi_instance->hw_table;
+ if (*hfi_instance->timestamp == new_timestamp) {
+ raw_spin_unlock(&hfi_instance->event_lock);
+ return;
+ }
+
+ raw_spin_lock(&hfi_instance->table_lock);
+
+ /*
+ * Copy the updated table into our local copy. This includes the new
+ * timestamp.
+ */
+ memcpy(hfi_instance->local_table, hfi_instance->hw_table,
+ hfi_features.nr_table_pages << PAGE_SHIFT);
+
+ raw_spin_unlock(&hfi_instance->table_lock);
+ raw_spin_unlock(&hfi_instance->event_lock);
+
+ /*
+ * Let hardware know that we are done reading the HFI table and it is
+ * free to update it again.
+ */
+ pkg_therm_status_msr_val &= THERM_STATUS_CLEAR_PKG_MASK &
+ ~PACKAGE_THERM_STATUS_HFI_UPDATED;
+ wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, pkg_therm_status_msr_val);
+
+ queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work,
+ HFI_UPDATE_INTERVAL);
+}
+
+static void init_hfi_cpu_index(struct hfi_cpu_info *info)
+{
+ union cpuid6_edx edx;
+
+ /* Do not re-read @cpu's index if it has already been initialized. */
+ if (info->index > -1)
+ return;
+
+ edx.full = cpuid_edx(CPUID_HFI_LEAF);
+ info->index = edx.split.index;
+}
+
+/*
+ * The format of the HFI table depends on the number of capabilities that the
+ * hardware supports. Keep a data structure to navigate the table.
+ */
+static void init_hfi_instance(struct hfi_instance *hfi_instance)
+{
+ /* The HFI header is below the time-stamp. */
+ hfi_instance->hdr = hfi_instance->local_table +
+ sizeof(*hfi_instance->timestamp);
+
+ /* The HFI data starts below the header. */
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+}
+
+/**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+ *
+ * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package
+ * level. The first CPU in the die/package to come online does the full HFI
+ * initialization. Subsequent CPUs will just link themselves to the HFI
+ * instance of their die/package.
+ *
+ * This function is called before enabling the thermal vector in the local APIC
+ * in order to ensure that @cpu has an associated HFI instance when it receives
+ * an HFI event.
+ */
+void intel_hfi_online(unsigned int cpu)
+{
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+ phys_addr_t hw_table_pa;
+ u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+ if (!hfi_instances)
+ return;
+
+ /*
+ * Link @cpu to the HFI instance of its package/die. It does not
+ * matter whether the instance has been initialized.
+ */
+ info = &per_cpu(hfi_cpu_info, cpu);
+ die_id = topology_logical_die_id(cpu);
+ hfi_instance = info->hfi_instance;
+ if (!hfi_instance) {
+ if (die_id < 0 || die_id >= max_hfi_instances)
+ return;
+
+ hfi_instance = &hfi_instances[die_id];
+ info->hfi_instance = hfi_instance;
+ }
+
+ init_hfi_cpu_index(info);
+
+ /*
+ * Now check if the HFI instance of the package/die of @cpu has been
+ * initialized (by checking its header). In such case, all we have to
+ * do is to add @cpu to this instance's cpumask.
+ */
+ mutex_lock(&hfi_instance_lock);
+ if (hfi_instance->hdr) {
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+ goto unlock;
+ }
+
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+ */
+ hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+ /*
+ * Program the address of the feedback table of this die/package. On
+ * some processors, hardware remembers the old address of the HFI table
+ * even after having been reprogrammed and re-enabled. Thus, do not free
+ * the pages allocated for the table or reprogram the hardware with a
+ * new base address. Namely, program the hardware only once.
+ */
+ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+ raw_spin_lock_init(&hfi_instance->table_lock);
+ raw_spin_lock_init(&hfi_instance->event_lock);
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+ /*
+ * Enable the hardware feedback interface and never disable it. See
+ * comment on programming the address of the table.
+ */
+ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+
+unlock:
+ mutex_unlock(&hfi_instance_lock);
+ return;
+
+free_hw_table:
+ free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages);
+ goto unlock;
+}
+
+/**
+ * intel_hfi_offline() - Disable HFI on @cpu
+ * @cpu: CPU in which the HFI will be disabled
+ *
+ * Remove @cpu from those covered by its HFI instance.
+ *
+ * On some processors, hardware remembers previous programming settings even
+ * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the
+ * die/package of @cpu are offline. See note in intel_hfi_online().
+ */
+void intel_hfi_offline(unsigned int cpu)
+{
+ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu);
+ struct hfi_instance *hfi_instance;
+
+ /*
+ * Check if @cpu as an associated, initialized (i.e., with a non-NULL
+ * header). Also, HFI instances are only initialized if X86_FEATURE_HFI
+ * is present.
+ */
+ hfi_instance = info->hfi_instance;
+ if (!hfi_instance)
+ return;
+
+ if (!hfi_instance->hdr)
+ return;
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
+ mutex_unlock(&hfi_instance_lock);
+}
+
+static __init int hfi_parse_features(void)
+{
+ unsigned int nr_capabilities;
+ union cpuid6_edx edx;
+
+ if (!boot_cpu_has(X86_FEATURE_HFI))
+ return -ENODEV;
+
+ /*
+ * If we are here we know that CPUID_HFI_LEAF exists. Parse the
+ * supported capabilities and the size of the HFI table.
+ */
+ edx.full = cpuid_edx(CPUID_HFI_LEAF);
+
+ if (!edx.split.capabilities.split.performance) {
+ pr_debug("Performance reporting not supported! Not using HFI\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The number of supported capabilities determines the number of
+ * columns in the HFI table. Exclude the reserved bits.
+ */
+ edx.split.capabilities.split.__reserved = 0;
+ nr_capabilities = hweight8(edx.split.capabilities.bits);
+
+ /* The number of 4KB pages required by the table */
+ hfi_features.nr_table_pages = edx.split.table_pages + 1;
+
+ /*
+ * The header contains change indications for each supported feature.
+ * The size of the table header is rounded up to be a multiple of 8
+ * bytes.
+ */
+ hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8;
+
+ /*
+ * Data of each logical processor is also rounded up to be a multiple
+ * of 8 bytes.
+ */
+ hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8;
+
+ return 0;
+}
+
+void __init intel_hfi_init(void)
+{
+ struct hfi_instance *hfi_instance;
+ int i, j;
+
+ if (hfi_parse_features())
+ return;
+
+ /* There is one HFI instance per die/package. */
+ max_hfi_instances = topology_max_packages() *
+ topology_max_die_per_package();
+
+ /*
+ * This allocation may fail. CPU hotplug callbacks must check
+ * for a null pointer.
+ */
+ hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances),
+ GFP_KERNEL);
+ if (!hfi_instances)
+ return;
+
+ for (i = 0; i < max_hfi_instances; i++) {
+ hfi_instance = &hfi_instances[i];
+ if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL))
+ goto err_nomem;
+ }
+
+ hfi_updates_wq = create_singlethread_workqueue("hfi-updates");
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
+ return;
+
+err_nomem:
+ for (j = 0; j < i; ++j) {
+ hfi_instance = &hfi_instances[j];
+ free_cpumask_var(hfi_instance->cpus);
+ }
+
+ kfree(hfi_instances);
+ hfi_instances = NULL;
+}
diff --git a/drivers/thermal/intel/intel_hfi.h b/drivers/thermal/intel/intel_hfi.h
new file mode 100644
index 000000000000..325aa78b745c
--- /dev/null
+++ b/drivers/thermal/intel/intel_hfi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_HFI_H
+#define _INTEL_HFI_H
+
+#if defined(CONFIG_INTEL_HFI_THERMAL)
+void __init intel_hfi_init(void);
+void intel_hfi_online(unsigned int cpu);
+void intel_hfi_offline(unsigned int cpu);
+void intel_hfi_process_event(__u64 pkg_therm_status_msr_val);
+#else
+static inline void intel_hfi_init(void) { }
+static inline void intel_hfi_online(unsigned int cpu) { }
+static inline void intel_hfi_offline(unsigned int cpu) { }
+static inline void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) { }
+#endif /* CONFIG_INTEL_HFI_THERMAL */
+
+#endif /* _INTEL_HFI_H */
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 14256421d98c..c841ab37e7c6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -556,12 +556,9 @@ static void end_power_clamp(void)
* stop faster.
*/
clamping = false;
- if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
- for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
- pr_debug("clamping worker for cpu %d alive, destroy\n",
- i);
- stop_power_clamp_worker(i);
- }
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping worker for cpu %d alive, destroy\n", i);
+ stop_power_clamp_worker(i);
}
}
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index dab7e8fb1059..8352083b87c7 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -32,6 +32,7 @@
#include <asm/irq.h>
#include <asm/msr.h>
+#include "intel_hfi.h"
#include "thermal_interrupt.h"
/* How long to wait between reporting thermal events */
@@ -475,6 +476,13 @@ static int thermal_throttle_online(unsigned int cpu)
INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
+ /*
+ * The first CPU coming online will enable the HFI. Usually this causes
+ * hardware to issue an HFI thermal interrupt. Such interrupt will reach
+ * the CPU once we enable the thermal vector in the local APIC.
+ */
+ intel_hfi_online(cpu);
+
/* Unmask the thermal vector after the above workqueues are initialized. */
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -492,6 +500,8 @@ static int thermal_throttle_offline(unsigned int cpu)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+ intel_hfi_offline(cpu);
+
cancel_delayed_work_sync(&state->package_throttle.therm_work);
cancel_delayed_work_sync(&state->core_throttle.therm_work);
@@ -509,6 +519,8 @@ static __init int thermal_throttle_init_device(void)
if (!atomic_read(&therm_throt_en))
return 0;
+ intel_hfi_init();
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
thermal_throttle_online,
thermal_throttle_offline);
@@ -608,6 +620,10 @@ void intel_thermal_interrupt(void)
PACKAGE_THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
PACKAGE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_HFI))
+ intel_hfi_process_event(msr_val &
+ PACKAGE_THERM_STATUS_HFI_UPDATED);
}
}
@@ -717,6 +733,12 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
l | (PACKAGE_THERM_INT_LOW_ENABLE
| PACKAGE_THERM_INT_HIGH_ENABLE), h);
+
+ if (cpu_has(c, X86_FEATURE_HFI)) {
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l | PACKAGE_THERM_INT_HFI_ENABLE, h);
+ }
}
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index eafa7526eb8b..c7f91cbdccc7 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -28,6 +28,8 @@
#define LMH_REG_DCVS_INTR_CLR 0x8
+#define LMH_ENABLE_ALGOS 1
+
struct lmh_hw_data {
void __iomem *base;
struct irq_domain *domain;
@@ -90,6 +92,7 @@ static int lmh_probe(struct platform_device *pdev)
struct device_node *cpu_node;
struct lmh_hw_data *lmh_data;
int temp_low, temp_high, temp_arm, cpu_id, ret;
+ unsigned int enable_alg;
u32 node_id;
lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
@@ -141,32 +144,36 @@ static int lmh_probe(struct platform_device *pdev)
if (!qcom_scm_lmh_dcvsh_available())
return -EINVAL;
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling current subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret) {
- dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
- return ret;
- }
-
- ret = qcom_scm_lmh_profile_change(0x1);
- if (ret) {
- dev_err(dev, "Error %d changing profile\n", ret);
- return ret;
+ enable_alg = (uintptr_t)of_device_get_match_data(dev);
+
+ if (enable_alg) {
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling current subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_profile_change(0x1);
+ if (ret) {
+ dev_err(dev, "Error %d changing profile\n", ret);
+ return ret;
+ }
}
/* Set default thermal trips */
@@ -213,7 +220,8 @@ static int lmh_probe(struct platform_device *pdev)
}
static const struct of_device_id lmh_table[] = {
- { .compatible = "qcom,sdm845-lmh", },
+ { .compatible = "qcom,sdm845-lmh", .data = (void *)LMH_ENABLE_ALGOS},
+ { .compatible = "qcom,sm8150-lmh", },
{}
};
MODULE_DEVICE_TABLE(of, lmh_table);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 99a8d9f3e03c..154d3cb19c88 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/thermal.h>
+#include "../thermal_hwmon.h"
#include "tsens.h"
/**
@@ -1060,6 +1061,10 @@ static int tsens_register(struct tsens_priv *priv)
priv->sensor[i].tzd = tzd;
if (priv->ops->enable)
priv->ops->enable(priv, i);
+
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(priv->dev,
+ "Failed to add hwmon sysfs attributes\n");
}
/* VER_0 require to set MIN and MAX THRESH
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index 94f1da1dcd69..5affc3d196be 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -52,6 +52,8 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
if (err)
return err;
+ if (msg.rx.ret)
+ return -EINVAL;
*out_temp = reply.get_temp.temp;
@@ -63,6 +65,7 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
struct tegra_bpmp_thermal_zone *zone = data;
struct mrq_thermal_host_to_bpmp_request req;
struct tegra_bpmp_message msg;
+ int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_SET_TRIP;
@@ -76,7 +79,13 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
msg.tx.data = &req;
msg.tx.size = sizeof(req);
- return tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tz_device_update_work_fn(struct work_struct *work)
@@ -140,6 +149,8 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
+ if (msg.rx.ret)
+ return -EINVAL;
*num_zones = reply.get_num_zones.num;
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index a16dd4d5d710..32fea5174cc0 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -43,6 +43,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] =
[THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 },
[THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING,
.len = THERMAL_NAME_LENGTH },
+ /* CPU capabilities */
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 },
};
struct param {
@@ -58,6 +63,8 @@ struct param {
int temp;
int cdev_state;
int cdev_max_state;
+ struct thermal_genl_cpu_caps *cpu_capabilities;
+ int cpu_capabilities_count;
};
typedef int (*cb_t)(struct param *);
@@ -190,6 +197,42 @@ static int thermal_genl_event_gov_change(struct param *p)
return 0;
}
+static int thermal_genl_event_cpu_capability_change(struct param *p)
+{
+ struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities;
+ struct sk_buff *msg = p->msg;
+ struct nlattr *start_cap;
+ int i;
+
+ start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY);
+ if (!start_cap)
+ return -EMSGSIZE;
+
+ for (i = 0; i < p->cpu_capabilities_count; ++i) {
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID,
+ cpu_cap->cpu))
+ goto out_cancel_nest;
+
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE,
+ cpu_cap->performance))
+ goto out_cancel_nest;
+
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY,
+ cpu_cap->efficiency))
+ goto out_cancel_nest;
+
+ ++cpu_cap;
+ }
+
+ nla_nest_end(msg, start_cap);
+
+ return 0;
+out_cancel_nest:
+ nla_nest_cancel(msg, start_cap);
+
+ return -EMSGSIZE;
+}
+
int thermal_genl_event_tz_delete(struct param *p)
__attribute__((alias("thermal_genl_event_tz")));
@@ -219,6 +262,7 @@ static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete,
[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update,
[THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change,
+ [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change,
};
/*
@@ -356,6 +400,15 @@ int thermal_notify_tz_gov_change(int tz_id, const char *name)
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p);
}
+int thermal_genl_cpu_capability_event(int count,
+ struct thermal_genl_cpu_caps *caps)
+{
+ struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p);
+}
+EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event);
+
/*************************** Command encoding ********************************/
static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz,
@@ -419,11 +472,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
for (i = 0; i < tz->trips; i++) {
enum thermal_trip_type type;
- int temp, hyst;
+ int temp, hyst = 0;
tz->ops->get_trip_type(tz, i, &type);
tz->ops->get_trip_temp(tz, i, &temp);
- tz->ops->get_trip_hyst(tz, i, &hyst);
+ if (tz->ops->get_trip_hyst)
+ tz->ops->get_trip_hyst(tz, i, &hyst);
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index e554f76291f4..1052f523188d 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -4,6 +4,12 @@
* Author: Daniel Lezcano <daniel.lezcano@linaro.org>
*/
+struct thermal_genl_cpu_caps {
+ int cpu;
+ int performance;
+ int efficiency;
+};
+
/* Netlink notification function */
#ifdef CONFIG_THERMAL_NETLINK
int __init thermal_netlink_init(void);
@@ -23,6 +29,8 @@ int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state);
int thermal_notify_cdev_delete(int cdev_id);
int thermal_notify_tz_gov_change(int tz_id, const char *name);
int thermal_genl_sampling_temp(int id, int temp);
+int thermal_genl_cpu_capability_event(int count,
+ struct thermal_genl_cpu_caps *caps);
#else
static inline int thermal_netlink_init(void)
{
@@ -101,4 +109,10 @@ static inline int thermal_genl_sampling_temp(int id, int temp)
{
return 0;
}
+
+static inline int thermal_genl_cpu_capability_event(int count, struct thermal_genl_cpu_caps *caps)
+{
+ return 0;
+}
+
#endif /* CONFIG_THERMAL_NETLINK */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index f84375865c97..703039d8b937 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -21,6 +21,7 @@
#include "ti-thermal.h"
#include "ti-bandgap.h"
+#include "../thermal_hwmon.h"
/* common data structures */
struct ti_thermal_data {
@@ -106,14 +107,6 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- int *temp)
-{
- struct ti_thermal_data *data = thermal->devdata;
-
- return __ti_thermal_get_temp(data, temp);
-}
-
static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
{
struct ti_thermal_data *data = p;
@@ -189,6 +182,9 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
ti_bandgap_set_sensor_data(bgp, id, data);
ti_bandgap_write_update_interval(bgp, data->sensor_id, interval);
+ if (devm_thermal_add_hwmon_sysfs(data->ti_thermal))
+ dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n");
+
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index ba27b274c967..fa92f727fdf8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -322,6 +322,7 @@ static int addr_cnt;
#define GSM1_ESCAPE_BITS 0x20
#define XON 0x11
#define XOFF 0x13
+#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops;
@@ -438,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
modembits |= MDM_RTR;
if (dlci->modem_tx & TIOCM_RI)
modembits |= MDM_IC;
- if (dlci->modem_tx & TIOCM_CD)
+ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
return modembits;
}
@@ -447,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
- * @cr: C/R bit from the frame
+ * @cr: C/R bit seen as initiator
* @control: control including PF bit
* @data: following data bytes
* @dlen: length of data
@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
int olen = 0;
while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
- || *input == XON || *input == XOFF) {
+ || (*input & ISO_IEC_646_MASK) == XON
+ || (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++;
@@ -546,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* gsm_send - send a control frame
* @gsm: our GSM mux
* @addr: address for control frame
- * @cr: command/response bit
+ * @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
* Format up and transmit a control frame. These do not go via the
@@ -561,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
int len;
u8 cbuf[10];
u8 ibuf[3];
+ int ocr;
+
+ /* toggle C/R coding if not initiator */
+ ocr = cr ^ (gsm->initiator ? 0 : 1);
switch (gsm->encoding) {
case 0:
cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[1] = (addr << 2) | (ocr << 1) | EA;
cbuf[2] = control;
cbuf[3] = EA; /* Length of data = 0 */
cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
@@ -575,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
case 1:
case 2:
/* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[0] = (addr << 2) | (ocr << 1) | EA;
ibuf[1] = control;
ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
/* Stuffing may double the size worst case */
@@ -609,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
{
- gsm_send(gsm, addr, 1, control);
+ gsm_send(gsm, addr, 0, control);
}
/**
@@ -1015,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
* @tty: virtual tty bound to the DLCI
* @dlci: DLCI to affect
* @modem: modem bits (full EA)
- * @clen: command length
+ * @slen: number of signal octets
*
* Used when a modem control message or line state inline in adaption
* layer 2 is processed. Sort out the local modem state and throttles
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
- u32 modem, int clen)
+ u32 modem, int slen)
{
int mlines = 0;
u8 brk = 0;
int fc;
- /* The modem status command can either contain one octet (v.24 signals)
- or two octets (v.24 signals + break signals). The length field will
- either be 2 or 3 respectively. This is specified in section
- 5.4.6.3.7 of the 27.010 mux spec. */
+ /* The modem status command can either contain one octet (V.24 signals)
+ * or two octets (V.24 signals + break signals). This is specified in
+ * section 5.4.6.3.7 of the 07.10 mux spec.
+ */
- if (clen == 2)
+ if (slen == 1)
modem = modem & 0x7f;
else {
brk = modem & 0x7f;
@@ -1090,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
+ int slen;
const u8 *dp = data;
struct tty_struct *tty;
@@ -1109,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
dlci = gsm->dlci[addr];
+ slen = len;
while (gsm_read_ea(&modem, *dp++) == 0) {
len--;
if (len == 0)
@@ -1125,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
modem |= (brk & 0x7f);
}
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1449,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
kfifo_reset(&dlci->fifo);
+ /* Ensure that gsmtty_open() can return. */
+ tty_port_set_initialized(&dlci->port, 0);
+ wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
/* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
@@ -1512,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
- gsm_dlci_close(dlci);
+ gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
@@ -1591,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
struct tty_struct *tty;
unsigned int modem = 0;
int len = clen;
+ int slen = 0;
if (debug & 16)
pr_debug("%d bytes for tty\n", len);
@@ -1603,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
case 2: /* Asynchronous serial with line state in each frame */
while (gsm_read_ea(&modem, *data++) == 0) {
len--;
+ slen++;
if (len == 0)
return;
}
+ slen++;
tty = tty_port_tty_get(port);
if (tty) {
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
tty_kref_put(tty);
}
fallthrough;
@@ -1746,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- tty_hangup(tty);
+ /* We cannot use tty_hangup() because in tty_kref_put() the tty
+ * driver assumes that the hangup queue is free and reuses it to
+ * queue release_one_tty() -> NULL pointer panic in
+ * process_one_work().
+ */
+ tty_vhangup(tty);
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
@@ -1798,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm)
goto invalid;
cr = gsm->address & 1; /* C/R bit */
+ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
- cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
dlci = gsm->dlci[address];
switch (gsm->control) {
@@ -3232,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
- /* Send an MSC with DTR cleared */
+ /* Send an MSC with RTS cleared */
gsmtty_modem_update(dlci, 0);
}
@@ -3244,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx |= TIOCM_DTR;
+ dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
- /* Send an MSC with DTR set */
+ /* Send an MSC with RTS set */
gsmtty_modem_update(dlci, 0);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 8933ef1f83c0..efc72104c840 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1329,7 +1329,7 @@ handle_newline:
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
return;
}
}
@@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
}
@@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
- n = min(*nr + 1, canon_head - ldata->read_tail);
+ n = min(*nr, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
n += N_TTY_BUF_SIZE;
c = n + found;
- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
- c = min(*nr, c);
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- }
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
diff --git a/drivers/tty/rpmsg_tty.c b/drivers/tty/rpmsg_tty.c
index dae2a4e44f38..29db413bbc03 100644
--- a/drivers/tty/rpmsg_tty.c
+++ b/drivers/tty/rpmsg_tty.c
@@ -50,10 +50,17 @@ static int rpmsg_tty_cb(struct rpmsg_device *rpdev, void *data, int len, void *p
static int rpmsg_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct rpmsg_tty_port *cport = idr_find(&tty_idr, tty->index);
+ struct tty_port *port;
tty->driver_data = cport;
- return tty_port_install(&cport->port, driver, tty);
+ port = tty_port_get(&cport->port);
+ return tty_port_install(port, driver, tty);
+}
+
+static void rpmsg_tty_cleanup(struct tty_struct *tty)
+{
+ tty_port_put(tty->port);
}
static int rpmsg_tty_open(struct tty_struct *tty, struct file *filp)
@@ -106,12 +113,19 @@ static unsigned int rpmsg_tty_write_room(struct tty_struct *tty)
return size;
}
+static void rpmsg_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
static const struct tty_operations rpmsg_tty_ops = {
.install = rpmsg_tty_install,
.open = rpmsg_tty_open,
.close = rpmsg_tty_close,
.write = rpmsg_tty_write,
.write_room = rpmsg_tty_write_room,
+ .hangup = rpmsg_tty_hangup,
+ .cleanup = rpmsg_tty_cleanup,
};
static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
@@ -137,8 +151,10 @@ static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
return cport;
}
-static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
+static void rpmsg_tty_destruct_port(struct tty_port *port)
{
+ struct rpmsg_tty_port *cport = container_of(port, struct rpmsg_tty_port, port);
+
mutex_lock(&idr_lock);
idr_remove(&tty_idr, cport->id);
mutex_unlock(&idr_lock);
@@ -146,7 +162,10 @@ static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
kfree(cport);
}
-static const struct tty_port_operations rpmsg_tty_port_ops = { };
+static const struct tty_port_operations rpmsg_tty_port_ops = {
+ .destruct = rpmsg_tty_destruct_port,
+};
+
static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
{
@@ -166,7 +185,8 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
cport->id, dev);
if (IS_ERR(tty_dev)) {
ret = dev_err_probe(dev, PTR_ERR(tty_dev), "Failed to register tty port\n");
- goto err_destroy;
+ tty_port_put(&cport->port);
+ return ret;
}
cport->rpdev = rpdev;
@@ -177,12 +197,6 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
rpdev->src, rpdev->dst, cport->id);
return 0;
-
-err_destroy:
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
-
- return ret;
}
static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
@@ -192,13 +206,11 @@ static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
dev_dbg(&rpdev->dev, "Removing rpmsg tty device %d\n", cport->id);
/* User hang up to release the tty */
- if (tty_port_initialized(&cport->port))
- tty_port_tty_hangup(&cport->port, false);
+ tty_port_tty_hangup(&cport->port, false);
tty_unregister_device(rpmsg_tty_driver, cport->id);
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
+ tty_port_put(&cport->port);
}
static struct rpmsg_device_id rpmsg_driver_tty_id_table[] = {
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 673cda3d011d..948d0a1c6ae8 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index bce28729dd7b..be8626234627 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
+ if (prop >= port->mapsize) {
+ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
+ prop, &port->mapsize);
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
port->mapbase += prop;
+ port->mapsize -= prop;
+ }
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e8b5469e9dfa..e17e97ea86fa 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -4779,8 +4779,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
+ /* Brainboxes Devices */
/*
- * BrainBoxes UC-260
+ * Brainboxes UC-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-260/271/701/756
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D21,
PCI_ANY_ID, PCI_ANY_ID,
@@ -4788,7 +4810,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0E34,
PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-268
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-275/279
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_8_115200 },
+ /*
+ * Brainboxes UC-302
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-320/324
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-357
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-420/431
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 025b055363c3..95ff10f25d58 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -117,7 +117,7 @@ static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *i
uart.port.private_data = pericom;
uart.port.iotype = UPIO_PORT;
uart.port.uartclk = 921600 * 16;
- uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.set_divisor = pericom_do_set_divisor;
for (i = 0; i < nr && i < maxnr; i++) {
unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 2abb3de11a48..3b12bfc1ed67 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2056,7 +2056,10 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
serial8250_rpm_put(up);
}
-static void wait_for_lsr(struct uart_8250_port *up, int bits)
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
{
unsigned int status, tmout = 10000;
@@ -2073,16 +2076,6 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
udelay(1);
touch_nmi_watchdog();
}
-}
-
-/*
- * Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
-{
- unsigned int tmout;
-
- wait_for_lsr(up, bits);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -3333,35 +3326,6 @@ static void serial8250_console_restore(struct uart_8250_port *up)
}
/*
- * Print a string to the serial port using the device FIFO
- *
- * It sends fifosize bytes and then waits for the fifo
- * to get empty.
- */
-static void serial8250_console_fifo_write(struct uart_8250_port *up,
- const char *s, unsigned int count)
-{
- int i;
- const char *end = s + count;
- unsigned int fifosize = up->port.fifosize;
- bool cr_sent = false;
-
- while (s != end) {
- wait_for_lsr(up, UART_LSR_THRE);
-
- for (i = 0; i < fifosize && s != end; ++i) {
- if (*s == '\n' && !cr_sent) {
- serial_out(up, UART_TX, '\r');
- cr_sent = true;
- } else {
- serial_out(up, UART_TX, *s++);
- cr_sent = false;
- }
- }
- }
-}
-
-/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
@@ -3376,7 +3340,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
struct uart_8250_em485 *em485 = up->em485;
struct uart_port *port = &up->port;
unsigned long flags;
- unsigned int ier, use_fifo;
+ unsigned int ier;
int locked = 1;
touch_nmi_watchdog();
@@ -3408,20 +3372,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
mdelay(port->rs485.delay_rts_before_send);
}
- use_fifo = (up->capabilities & UART_CAP_FIFO) &&
- port->fifosize > 1 &&
- (serial_port_in(port, UART_FCR) & UART_FCR_ENABLE_FIFO) &&
- /*
- * After we put a data in the fifo, the controller will send
- * it regardless of the CTS state. Therefore, only use fifo
- * if we don't use control flow.
- */
- !(up->port.flags & UPF_CONS_FLOW);
-
- if (likely(use_fifo))
- serial8250_console_fifo_write(up, s, count);
- else
- uart_console_write(port, s, count, serial8250_console_putchar);
+ uart_console_write(port, s, count, serial8250_console_putchar);
/*
* Finally, wait for transmitter to become empty
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 1f1df46242f9..ba053a68529f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1582,9 +1582,6 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
container_of(port, struct uart_amba_port, port);
unsigned int cr;
- if (port->rs485.flags & SER_RS485_ENABLED)
- mctrl &= ~TIOCM_RTS;
-
cr = pl011_read(uap, REG_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
@@ -1808,14 +1805,8 @@ static int pl011_startup(struct uart_port *port)
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
- } else {
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
cr |= UART011_CR_TXE;
- }
pl011_write(cr, uap, REG_CR);
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 3c92d4e01488..516cff362434 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -805,7 +805,7 @@ static int max3100_probe(struct spi_device *spi)
return 0;
}
-static int max3100_remove(struct spi_device *spi)
+static void max3100_remove(struct spi_device *spi)
{
struct max3100_port *s = spi_get_drvdata(spi);
int i;
@@ -828,13 +828,12 @@ static int max3100_remove(struct spi_device *spi)
for (i = 0; i < MAX_MAX3100; i++)
if (max3100s[i]) {
mutex_unlock(&max3100s_lock);
- return 0;
+ return;
}
pr_debug("removing max3100 driver\n");
uart_unregister_driver(&max3100_uart_driver);
mutex_unlock(&max3100s_lock);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index dde0824b2fa5..3112b4a05448 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1487,10 +1487,9 @@ static int max310x_spi_probe(struct spi_device *spi)
return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
}
-static int max310x_spi_remove(struct spi_device *spi)
+static void max310x_spi_remove(struct spi_device *spi)
{
max310x_remove(&spi->dev);
- return 0;
}
static const struct spi_device_id max310x_id_table[] = {
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 64e7e6c8145f..3a6c68e19c80 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
msleep(port->rs485.delay_rts_before_send);
+ mutex_lock(&s->efr_lock);
sc16is7xx_handle_tx(port);
+ mutex_unlock(&s->efr_lock);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
@@ -1440,11 +1443,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
}
-static int sc16is7xx_spi_remove(struct spi_device *spi)
+static void sc16is7xx_spi_remove(struct spi_device *spi)
{
sc16is7xx_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id sc16is7xx_spi_id_table[] = {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index dc40c4155356..0db90be4c3bc 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -144,6 +144,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ set &= ~TIOCM_RTS;
+ clear &= ~TIOCM_RTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
@@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
static void uart_port_dtr_rts(struct uart_port *uport, int raise)
{
- int rs485_on = uport->rs485_config &&
- (uport->rs485.flags & SER_RS485_ENABLED);
- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND);
-
- if (raise) {
- if (rs485_on && RTS_after_send) {
- uart_set_mctrl(uport, TIOCM_DTR);
- uart_clear_mctrl(uport, TIOCM_RTS);
- } else {
- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- }
- } else {
- unsigned int clear = TIOCM_DTR;
-
- clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0;
- uart_clear_mctrl(uport, clear);
- }
+ if (raise)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -1075,11 +1067,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
goto out;
if (!tty_io_error(tty)) {
- if (uport->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -2390,6 +2377,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
+ port->mctrl |= TIOCM_RTS;
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 1f89ab0e49ac..9570002d07e7 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -550,11 +550,23 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
+ u32 isr;
+ int ret;
if (port->x_char) {
if (stm32_usart_tx_dma_started(stm32_port) &&
stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ /* Check that TDR is empty before filling FIFO */
+ ret =
+ readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TXE),
+ 10, 1000);
+ if (ret)
+ dev_warn(port->dev, "1 character may be erased\n");
+
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
@@ -730,7 +742,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit) && !port->x_char)
return;
if (rs485conf->flags & SER_RS485_ENABLED) {
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 7e8b3bd59c7b..8fec1d8648f5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3088,7 +3088,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
- tty = kzalloc(sizeof(*tty), GFP_KERNEL);
+ tty = kzalloc(sizeof(*tty), GFP_KERNEL_ACCOUNT);
if (!tty)
return NULL;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 3639bb6dc372..58013698635f 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa)
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
return -ENXIO;
- vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1);
vsa.console--;
+ vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(vsa.console);
if (ret) {
@@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty,
return -ENXIO;
arg--;
+ arg = array_index_nospec(arg, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(arg);
console_unlock();
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 55c73b1d8704..d00ff98dffab 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -483,11 +483,11 @@ int cdns_drd_exit(struct cdns *cdns)
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
+ if (cdns->version == CDNS3_CONTROLLER_V0) {
+ if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
return true;
} else {
- if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
+ if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
return true;
}
return false;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 73f419adce61..4bb6d304eb4b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1919,6 +1919,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
struct usbtmc_ctrlrequest request;
u8 *buffer = NULL;
int rv;
+ unsigned int is_in, pipe;
unsigned long res;
res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
@@ -1928,12 +1929,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (request.req.wLength > USBTMC_BUFSIZE)
return -EMSGSIZE;
+ is_in = request.req.bRequestType & USB_DIR_IN;
+
if (request.req.wLength) {
buffer = kmalloc(request.req.wLength, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- if ((request.req.bRequestType & USB_DIR_IN) == 0) {
+ if (!is_in) {
/* Send control data to device */
res = copy_from_user(buffer, request.data,
request.req.wLength);
@@ -1944,8 +1947,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
}
}
+ if (is_in)
+ pipe = usb_rcvctrlpipe(data->usb_dev, 0);
+ else
+ pipe = usb_sndctrlpipe(data->usb_dev, 0);
rv = usb_control_msg(data->usb_dev,
- usb_rcvctrlpipe(data->usb_dev, 0),
+ pipe,
request.req.bRequest,
request.req.bRequestType,
request.req.wValue,
@@ -1957,7 +1964,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
goto exit;
}
- if (rv && (request.req.bRequestType & USB_DIR_IN)) {
+ if (rv && is_in) {
/* Read control data from device */
res = copy_to_user(request.data, buffer, rv);
if (res)
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 4169cf40a03b..5509d3847af4 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id;
- /* Some ULPI devices don't have a vendor id so rely on OF match */
- if (ulpi->id.vendor == 0)
+ /*
+ * Some ULPI devices don't have a vendor id
+ * or provide an id_table so rely on OF match.
+ */
+ if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++)
@@ -127,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = {
static void ulpi_dev_release(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(to_ulpi_dev(dev));
}
@@ -244,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
ret = ulpi_read_id(ulpi);
- if (ret)
+ if (ret) {
+ of_node_put(ulpi->dev.of_node);
return ret;
+ }
ret = device_register(&ulpi->dev);
- if (ret)
+ if (ret) {
+ put_device(&ulpi->dev);
return ret;
+ }
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
@@ -296,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- of_node_put(ulpi->dev.of_node);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index d630cccd2e6e..dd44e37a454a 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -446,7 +446,7 @@ static int suspend_common(struct device *dev, bool do_wakeup)
HCD_WAKEUP_PENDING(hcd->shared_hcd))
return -EBUSY;
retval = hcd->driver->pci_suspend(hcd, do_wakeup);
- suspend_report_result(hcd->driver->pci_suspend, retval);
+ suspend_report_result(dev, hcd->driver->pci_suspend, retval);
/* Check again in case wakeup raced with pci_suspend */
if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) ||
@@ -556,7 +556,7 @@ static int hcd_pci_suspend_noirq(struct device *dev)
dev_dbg(dev, "--> PCI %s\n",
pci_power_name(pci_dev->current_state));
} else {
- suspend_report_result(pci_prepare_to_sleep, retval);
+ suspend_report_result(dev, pci_prepare_to_sleep, retval);
return retval;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3e01dd6e509b..d9712c2602af 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1563,6 +1563,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
@@ -1665,6 +1672,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index c2bbf97a79be..d5bc36ca5b1f 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -602,11 +602,14 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
- find_and_link_peer(hub, port1);
-
retval = component_add(&port_dev->dev, &connector_ops);
- if (retval)
+ if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
+ device_unregister(&port_dev->dev);
+ return retval;
+ }
+
+ find_and_link_peer(hub, port1);
/*
* Enable runtime pm and hold a refernce that hub_configure()
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 30727729a44c..33d62d7e3929 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -715,6 +715,12 @@ void usb_kill_urb(struct urb *urb)
if (!(urb && urb->dev && urb->ep))
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -756,6 +762,12 @@ void usb_poison_urb(struct urb *urb)
if (!urb)
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
if (!urb->dev || !urb->ep)
return;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 8a63da3ab39d..88c337bf564f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
@@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
int testmode)
{ return 0; }
#define dwc2_is_device_connected(hsotg) (0)
+#define dwc2_is_device_enabled(hsotg) (0)
static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index 1b39c4776369..d8d6493bc457 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
already = dwc2_ovr_bvalid(hsotg, true);
- /* This clear DCTL.SFTDISCON bit */
- dwc2_hsotg_core_connect(hsotg);
+ if (dwc2_is_device_enabled(hsotg)) {
+ /* This clear DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_connect(hsotg);
+ }
} else {
if (dwc2_is_device_mode(hsotg)) {
if (!dwc2_ovr_bvalid(hsotg, false))
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 2bc03f41c70a..eee3504397e6 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -5097,7 +5097,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
- for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7ff8fc8f79a9..06d0e88ec8af 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -43,6 +43,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
static struct gpiod_lookup_table platform_bytcr_gpios = {
.dev_id = "0000:00:16.0",
.table = {
- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
{}
},
};
@@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
{}
};
+static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
@@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
.properties = dwc3_pci_intel_properties,
};
+static const struct software_node dwc3_pci_intel_byt_swnode = {
+ .properties = dwc3_pci_intel_byt_properties,
+};
+
static const struct software_node dwc3_pci_intel_mrfld_swnode = {
.properties = dwc3_pci_mrfld_properties,
};
@@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
- (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
@@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 9cc3ad701a29..a6f3a9b38789 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -99,17 +99,29 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct phy *usb3_phy;
- int ret;
+ int ret = 0;
u32 reg;
- usb3_phy = devm_phy_get(dev, "usb3-phy");
- if (PTR_ERR(usb3_phy) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(usb3_phy)) {
+ ret = PTR_ERR(usb3_phy);
+ dev_err_probe(dev, ret,
+ "failed to get USB3 PHY\n");
goto err;
- } else if (IS_ERR(usb3_phy)) {
- usb3_phy = NULL;
}
+ /*
+ * The following core resets are not required unless a USB3 PHY
+ * is used, and the subsequent register settings are not required
+ * unless a core reset is performed (they should be set properly
+ * by the first-stage boot loader, but may be reverted by a core
+ * reset). They may also break the configuration if USB3 is actually
+ * in use but the usb3-phy entry is missing from the device tree.
+ * Therefore, skip these operations in this case.
+ */
+ if (!usb3_phy)
+ goto skip_usb3_phy;
+
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
if (IS_ERR(crst)) {
ret = PTR_ERR(crst);
@@ -188,6 +200,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
+skip_usb3_phy:
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 520031ba38aa..a0c883f19a41 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1291,6 +1291,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+ /*
+ * As per data book 4.2.3.2TRB Control Bit Rules section
+ *
+ * The controller autonomously checks the HWO field of a TRB to determine if the
+ * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+ * is valid before setting the HWO field to '1'. In most systems, this means that
+ * software must update the fourth DWORD of a TRB last.
+ *
+ * However there is a possibility of CPU re-ordering here which can cause
+ * controller to observe the HWO bit set prematurely.
+ * Add a write memory barrier to prevent CPU re-ordering.
+ */
+ wmb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_enq(dep);
@@ -4147,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
+ local_bh_disable();
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
+ local_bh_enable();
return ret;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 16f9e3423c9f..9315313108c9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1988,6 +1988,9 @@ unknown:
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
+ if (interface >= MAX_CONFIG_INTERFACES ||
+ !os_desc_cfg->interface[interface])
+ break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 25ad1e97a458..1922fd02043c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs)
static void ffs_data_closed(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles,
- ffs->eps_count);
- ffs->epfiles = NULL;
- }
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock,
+ flags);
+
+ if (epfiles)
+ ffs_epfiles_destroy(epfiles,
+ ffs->eps_count);
+
if (ffs->setup_state == FFS_SETUP_PENDING)
__ffs_ep0_stall(ffs);
} else {
@@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
static void ffs_data_clear(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock, flags);
+
+ /*
+ * potential race possible between ffs_func_eps_disable
+ * & ffs_epfile_release therefore maintaining a local
+ * copy of epfile will save us from use-after-free.
+ */
+ if (epfiles) {
+ ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
@@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
static void ffs_func_eps_disable(struct ffs_function *func)
{
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = func->ffs->epfiles;
- unsigned count = func->ffs->eps_count;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ count = func->ffs->eps_count;
+ epfile = func->ffs->epfiles;
+ ep = func->eps;
while (count--) {
/* pending requests get nuked */
if (ep->ep)
@@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func)
static int ffs_func_eps_enable(struct ffs_function *func)
{
- struct ffs_data *ffs = func->ffs;
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = ffs->epfiles;
- unsigned count = ffs->eps_count;
+ struct ffs_data *ffs;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ ffs = func->ffs;
+ ep = func->eps;
+ epfile = ffs->epfiles;
+ count = ffs->eps_count;
while(count--) {
ep->ep->driver_data = ep;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 46dd11dcb3a8..7371c6e65b10 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -179,6 +179,7 @@
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
+#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 1abf08e5164a..6803cd60cc6d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -584,6 +584,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
if (is_iso) {
switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) *
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 36fa6ef0581b..097a709549d6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
@@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 431d5a7d737e..713efd9aefde 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -637,14 +637,18 @@ static int rndis_set_response(struct rndis_params *params,
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
+ BufLength = le32_to_cpu(buf->InformationBufferLength);
+ BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+ if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+ return -EINVAL;
+
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
- BufLength = le32_to_cpu(buf->InformationBufferLength);
- BufOffset = le32_to_cpu(buf->InformationBufferOffset);
-
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);
@@ -919,6 +923,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->resp_avail = resp_avail;
params->v = v;
INIT_LIST_HEAD(&params->resp_queue);
+ spin_lock_init(&params->resp_lock);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
@@ -1012,12 +1017,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
{
rndis_resp_t *r, *n;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
}
+ spin_unlock(&params->resp_lock);
}
EXPORT_SYMBOL_GPL(rndis_free_response);
@@ -1027,14 +1034,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
if (!length) return NULL;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
+ spin_unlock(&params->resp_lock);
return r->buf;
}
}
+ spin_unlock(&params->resp_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rndis_get_next_response);
@@ -1051,7 +1061,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
r->length = length;
r->send = 0;
+ spin_lock(&params->resp_lock);
list_add_tail(&r->list, &params->resp_queue);
+ spin_unlock(&params->resp_lock);
return r;
}
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index f6167f7fea82..6206b8b7490f 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -174,6 +174,7 @@ typedef struct rndis_params {
void (*resp_avail)(void *v);
void *v;
struct list_head resp_queue;
+ spinlock_t resp_lock;
} rndis_params;
/* RNDIS Message parser and other useless functions */
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index c5a2c734234a..d86c3a36441e 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
ret = -EBUSY;
goto out_unlock;
}
- if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+ if (in != usb_endpoint_dir_in(ep->ep->desc)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index dd0819df096e..9040a0561466 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1895,7 +1895,7 @@ static int at91udc_probe(struct platform_device *pdev)
at91_vbus_irq, 0, driver_name, udc);
if (retval) {
DBG("request vbus irq %d failed\n",
- udc->board.vbus_pin);
+ desc_to_gpio(udc->board.vbus_pin));
goto err_unprepare_iclk;
}
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 568534a0d17c..c109b069f511 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1436,7 +1436,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
usb_gadget_udc_stop(udc);
udc->driver = NULL;
- udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL;
}
@@ -1498,7 +1497,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->function);
udc->driver = driver;
- udc->dev.driver = &driver->driver;
udc->gadget->dev.driver = &driver->driver;
usb_gadget_udc_set_speed(udc, driver->max_speed);
@@ -1521,7 +1519,6 @@ err1:
dev_err(&udc->dev, "failed to start %s: %d\n",
udc->driver->function, ret);
udc->driver = NULL;
- udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL;
return ret;
}
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index d2a2b20cc1ad..7d9bd16190c0 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1292,7 +1292,7 @@ del_gadget:
return err;
}
-static int max3420_remove(struct spi_device *spi)
+static void max3420_remove(struct spi_device *spi)
{
struct max3420_udc *udc = spi_get_drvdata(spi);
unsigned long flags;
@@ -1304,8 +1304,6 @@ static int max3420_remove(struct spi_device *spi)
kthread_stop(udc->thread_task);
spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
}
static const struct of_device_id max3420_udc_of_match[] = {
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 57d417a7c3e0..601829a6b4ba 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev,
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
+ if (cur_role == USB_ROLE_HOST)
+ device_release_driver(host);
if (usb3->driver)
usb3_disconnect(usb3);
usb3_vbus_out(usb3, false);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6ce886fb7bfe..2907fad04e2c 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
break;
case USB_RECIP_ENDPOINT:
epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum >= XUSB_MAX_ENDPOINTS)
+ goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (endpoint >= XUSB_MAX_ENDPOINTS) {
+ xudc_ep0_stall(udc);
+ return;
+ }
target_ep = &udc->ep[endpoint];
outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 30de85a707fe..99a5523a79fb 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1926,7 +1926,7 @@ error:
return retval;
}
-static int
+static void
max3421_remove(struct spi_device *spi)
{
struct max3421_hcd *max3421_hcd;
@@ -1947,7 +1947,6 @@ max3421_remove(struct spi_device *spi)
free_irq(spi->irq, hcd);
usb_put_hcd(hcd);
- return 0;
}
static const struct of_device_id max3421_of_match_table[] = {
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index be09fd9bac58..19b8c7ed74cb 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
return 0;
}
-static void xenhcd_gnttab_done(struct usb_shadow *shadow)
+static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
{
+ struct usb_shadow *shadow = info->shadow + id;
int nr_segs = 0;
int i;
@@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow)
if (xenusb_pipeisoc(shadow->req.pipe))
nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
- for (i = 0; i < nr_segs; i++)
- gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
+ for (i = 0; i < nr_segs; i++) {
+ if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
+ xenhcd_set_error(info, "backend didn't release grant");
+ }
shadow->req.nr_buffer_segs = 0;
shadow->req.u.isoc.nr_frame_desc_segs = 0;
@@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
req_id = urbp->req_id;
if (!urbp->unlinked) {
- xenhcd_gnttab_done(&info->shadow[req_id]);
+ xenhcd_gnttab_done(info, req_id);
+ if (info->error)
+ return;
if (urbp->urb->status == -EINPROGRESS)
/* not dequeued */
xenhcd_giveback_urb(info, urbp->urb,
@@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
rp = info->urb_ring.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
xenhcd_set_error(info, "Illegal index on urb-ring");
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
+ goto err;
}
rmb(); /* ensure we see queued responses up to "rp" */
@@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
id = res.id;
if (id >= XENUSB_URB_RING_SIZE) {
xenhcd_set_error(info, "Illegal data on urb-ring");
- continue;
+ goto err;
}
if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
- xenhcd_gnttab_done(&info->shadow[id]);
+ xenhcd_gnttab_done(info, id);
+ if (info->error)
+ goto err;
urb = info->shadow[id].urb;
if (likely(urb)) {
urb->actual_length = res.actual_length;
@@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
spin_unlock_irqrestore(&info->lock, flags);
return more_to_do;
+
+ err:
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
}
static int xenhcd_conn_notify(struct xenhcd_info *info)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c1edcc9b13ce..dc570ce4e831 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dc357cabb265..2d378543bc3a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
+ bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
- hibernated = true;
- if (!hibernated) {
+ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ reinit_xhc = true;
+
+ if (!reinit_xhc) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- temp = readl(&xhci->op_regs->status);
}
- /* If restore operation fails, re-initialize the HC during resume */
- if ((temp & STS_SRE) || hibernated) {
+ temp = readl(&xhci->op_regs->status);
+ /* re-initialize the HC on Restore Error, or Host Controller Error */
+ if (temp & (STS_SRE | STS_HCE)) {
+ reinit_xhc = true;
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ }
+
+ if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct urb_priv *urb_priv;
int num_tds;
- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
- true, true, __func__) <= 0)
+ if (!urb)
return -EINVAL;
+ ret = xhci_check_args(hcd, urb->dev, urb->ep,
+ true, true, __func__);
+ if (ret <= 0)
+ return ret ? ret : -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
- return -EINVAL;
+ return ret ? ret : -EINVAL;
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 507deef1f709..04c4e3fed094 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
+ if (of_property_read_u8(np, "boost-up", &hub->boost_up))
+ hub->boost_up = USB251XB_DEF_BOOST_UP;
+
cproperty_char = of_get_property(np, "manufacturer", NULL);
strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
@@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
* may be as soon as needed.
*/
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
- hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 7d4d0713f4f0..d2b7e613eb34 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -327,7 +327,6 @@ static int omap2430_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
- device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 29f4b87a9e74..2798fca71261 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -81,10 +81,10 @@
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1a86, 0x5512) },
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x2184, 0x0057) },
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x9986, 0x7523) },
{ },
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8a60c0d56863..a27f7efcec6a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port);
static void cp210x_disable_event_mode(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
@@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
+ { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4edebd14ef29..49c08f07c969 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
@@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 755858ca20ba..d1a9564697a4 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1506,6 +1506,9 @@
#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */
+#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */
+#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */
#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42420bfc983c..e7755d9cfc61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
+#define DELL_PRODUCT_5829E_ESIM 0x81e4
+#define DELL_PRODUCT_5829E 0x81e6
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
+ .driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+ .driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
.driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 29191d33c0e3..1a05e3dcfec8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
+/*
+ * Reported by DocMAX <mail@vacharakis.de>
+ * and Thomas Weißschuh <linux@weissschuh.net>
+ */
+UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
+ "VIA Labs, Inc.",
+ "VL817 SATA Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index 07d307418b47..a929e000d0e2 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -56,7 +56,12 @@ int typec_link_ports(struct typec_port *con)
{
struct each_port_arg arg = { .port = con, .match = NULL };
- bus_for_each_dev(&acpi_bus_type, NULL, &arg, typec_port_match);
+ if (!has_acpi_companion(&con->dev))
+ return 0;
+
+ acpi_bus_for_each_dev(typec_port_match, &arg);
+ if (!arg.match)
+ return 0;
/*
* REVISIT: Now each connector can have only a single component master.
@@ -74,5 +79,6 @@ int typec_link_ports(struct typec_port *con)
void typec_unlink_ports(struct typec_port *con)
{
- component_master_del(&con->dev, &typec_aggregate_ops);
+ if (has_acpi_companion(&con->dev))
+ component_master_del(&con->dev, &typec_aggregate_ops);
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 35a1307349a2..e07d26a3cd8e 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -75,9 +75,25 @@ static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ bool vconn_pres;
+ enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
unsigned int reg;
int ret;
+ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
+ if (vconn_pres) {
+ ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (reg & TCPC_TCPC_CTRL_ORIENTATION)
+ polarity = TYPEC_POLARITY_CC2;
+ }
+
switch (cc) {
case TYPEC_CC_RA:
reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
@@ -112,6 +128,16 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
break;
}
+ if (vconn_pres) {
+ if (polarity == TYPEC_POLARITY_CC2) {
+ reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
+ } else {
+ reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ }
+ }
+
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 2be7a77d400e..b2edd45f13c6 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -98,6 +98,7 @@
#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 59d4fa2443f2..5fce795b69c7 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5156,7 +5156,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case SNK_TRYWAIT_DEBOUNCE:
break;
case SNK_ATTACH_WAIT:
- tcpm_set_state(port, SNK_UNATTACHED, 0);
+ case SNK_DEBOUNCED:
+ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break;
case SNK_NEGOTIATE_CAPABILITIES:
@@ -5263,6 +5264,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case PR_SWAP_SNK_SRC_SOURCE_ON:
/* Do nothing, vsafe0v is expected during transition */
break;
+ case SNK_ATTACH_WAIT:
+ case SNK_DEBOUNCED:
+ /*Do nothing, still waiting for VSAFE5V for connect */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 6d27a5b5e3ca..7ffcda94d323 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client)
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
trace_tps6598x_status(status);
ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
/*
* This fwnode has a "compatible" property, but is never populated as a
@@ -855,7 +855,8 @@ err_role_put:
usb_role_switch_put(tps->role_sw);
err_fwnode_put:
fwnode_handle_put(fwnode);
-
+err_clear_mask:
+ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
return ret;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index bff96d64dddf..6db7c8ddd51c 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
if (status < 0)
return status;
- if (!data)
+ if (!(data & DEV_INT))
return 0;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index f648f1c54a0f..d0f91078600e 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
switch (cmd) {
case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+ /* This mq feature check aligns with pre-existing userspace
+ * implementation.
+ *
+ * Without it, an untrusted driver could fake a multiqueue config
+ * request down to a non-mq device that may cause kernel to
+ * panic due to uninitialized resources for extra vqs. Even with
+ * a well behaving guest driver, it is not expected to allow
+ * changing the number of vqs on a non-mq device.
+ */
+ if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
+ break;
+
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
if (read != sizeof(mq))
break;
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+ newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+ break;
+
if (ndev->cur_num_vqs == 2 * newqps) {
status = VIRTIO_NET_OK;
break;
@@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
return ndev->mvdev.mlx_features;
}
-static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
{
+ /* Minimum features to expect */
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return -EOPNOTSUPP;
+ /* Double check features combination sent down by the driver.
+ * Fail invalid features due to absence of the depended feature.
+ *
+ * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
+ * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
+ * By failing the invalid features sent down by untrusted drivers,
+ * we're assured the assumption made upon is_index_valid() and
+ * is_ctrl_vq_idx() will not be compromised.
+ */
+ if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
+ BIT_ULL(VIRTIO_NET_F_MQ))
+ return -EINVAL;
+
return 0;
}
@@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
print_features(mvdev, features, true);
- err = verify_min_features(mvdev, features);
+ err = verify_driver_features(mvdev, features);
if (err)
return err;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 9846c9de4bfa..1ea525433a5c 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
- vdpa_set_features(vdev, 0, true);
+ vdpa_set_features_unlocked(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 2b1143f11d8f..0a4d93edc4c0 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
- return iova_pfn << shift;
+ return (dma_addr_t)iova_pfn << shift;
}
static void vduse_domain_free_iova(struct iova_domain *iovad,
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index a57e381e830b..cce101e6a940 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
{
struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
- vdpa_unregister_device(&vp_vdpa->vdpa);
vp_modern_remove(&vp_vdpa->mdev);
+ vdpa_unregister_device(&vp_vdpa->vdpa);
}
static struct pci_driver vp_vdpa_driver = {
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 670d56c879e5..40b098320b2a 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
if (last < start)
return -EFAULT;
+ /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+ * otherwise its size would overflow u64.
+ */
+ if (start == 0 && last == ULONG_MAX) {
+ u64 mid = last / 2;
+
+ vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
+ addr += mid + 1;
+ start = mid + 1;
+ }
+
if (iotlb->limit &&
iotlb->nmaps == iotlb->limit &&
iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 851539807bc9..ec5249e8c32d 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (vdpa_set_features(vdpa, features, false))
+ if (vdpa_set_features(vdpa, features))
return -EINVAL;
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 59edb5a1ffe2..1768362115c6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1170,6 +1170,13 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
+ if ((msg.type == VHOST_IOTLB_UPDATE ||
+ msg.type == VHOST_IOTLB_INVALIDATE) &&
+ msg.size == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
if (dev->msg_handler)
ret = dev->msg_handler(dev, &msg);
else
@@ -1981,7 +1988,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
return 0;
}
-static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{
if (vhost_put_avail_event(vq))
return -EFAULT;
@@ -2527,7 +2534,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
}
} else {
- r = vhost_update_avail_event(vq, vq->avail_idx);
+ r = vhost_update_avail_event(vq);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d6ca1c7ad513..e6c9d41db1de 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -629,16 +629,18 @@ err:
return ret;
}
-static int vhost_vsock_stop(struct vhost_vsock *vsock)
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
{
size_t i;
- int ret;
+ int ret = 0;
mutex_lock(&vsock->dev.mutex);
- ret = vhost_dev_check_owner(&vsock->dev);
- if (ret)
- goto err;
+ if (check_owner) {
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+ }
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
struct vhost_virtqueue *vq = &vsock->vqs[i];
@@ -751,9 +753,15 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
/* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */
- vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+ vsock_for_each_connected_socket(&vhost_transport.transport,
+ vhost_vsock_reset_orphans);
- vhost_vsock_stop(vsock);
+ /* Don't check the owner, because we are in the release path, so we
+ * need to stop the vsock device in any case.
+ * vhost_vsock_stop() can not fail in this case, so we don't need to
+ * check the return code.
+ */
+ vhost_vsock_stop(vsock, false);
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
@@ -868,7 +876,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (start)
return vhost_vsock_start(vsock);
else
- return vhost_vsock_stop(vsock);
+ return vhost_vsock_stop(vsock, true);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 8a4361e95a11..522dd81110b8 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -506,12 +506,11 @@ static int ams369fg06_probe(struct spi_device *spi)
return 0;
}
-static int ams369fg06_remove(struct spi_device *spi)
+static void ams369fg06_remove(struct spi_device *spi)
{
struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 33f5d80495e6..0a57033ae31d 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -542,7 +542,7 @@ static int corgi_lcd_probe(struct spi_device *spi)
return 0;
}
-static int corgi_lcd_remove(struct spi_device *spi)
+static void corgi_lcd_remove(struct spi_device *spi)
{
struct corgi_lcd *lcd = spi_get_drvdata(spi);
@@ -550,7 +550,6 @@ static int corgi_lcd_remove(struct spi_device *spi)
lcd->bl_dev->props.brightness = 0;
backlight_update_status(lcd->bl_dev);
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
- return 0;
}
static struct spi_driver corgi_lcd_driver = {
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index 328aba9cddad..e7b6bd827986 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -526,10 +526,9 @@ static int ili922x_probe(struct spi_device *spi)
return 0;
}
-static int ili922x_remove(struct spi_device *spi)
+static void ili922x_remove(struct spi_device *spi)
{
ili922x_poweroff(spi);
- return 0;
}
static struct spi_driver ili922x_driver = {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 46f97d1c3d21..cc763cf15f53 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -223,12 +223,11 @@ static int l4f00242t03_probe(struct spi_device *spi)
return 0;
}
-static int l4f00242t03_remove(struct spi_device *spi)
+static void l4f00242t03_remove(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
- return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index e8b185bb6f5e..1d17c439430e 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -594,7 +594,6 @@ static int lm3630a_remove(struct i2c_client *client)
if (pchip->irq) {
free_irq(pchip->irq, pchip);
- flush_workqueue(pchip->irqthread);
destroy_workqueue(pchip->irqthread);
}
return 0;
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index f949b66dce1b..5c46df8022bf 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -364,12 +364,11 @@ static int lms501kf03_probe(struct spi_device *spi)
return 0;
}
-static int lms501kf03_remove(struct spi_device *spi)
+static void lms501kf03_remove(struct spi_device *spi)
{
struct lms501kf03 *lcd = spi_get_drvdata(spi);
lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 5cbf621e48bd..b6d373af6e3f 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -255,12 +255,11 @@ static int ltv350qv_probe(struct spi_device *spi)
return 0;
}
-static int ltv350qv_remove(struct spi_device *spi)
+static void ltv350qv_remove(struct spi_device *spi)
{
struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 306bcc6ccb92..527210e85795 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1734,6 +1734,7 @@ static const struct of_device_id wled_match_table[] = {
{ .compatible = "qcom,pmi8994-wled", .data = (void *)4 },
{ .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
{ .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm6150l-wled", .data = (void *)5 },
{ .compatible = "qcom,pm8150l-wled", .data = (void *)5 },
{}
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 0de044dcafd5..fc6fbaf85594 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -397,12 +397,11 @@ static int tdo24m_probe(struct spi_device *spi)
return 0;
}
-static int tdo24m_remove(struct spi_device *spi)
+static void tdo24m_remove(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 38765544345b..23d6c6bf0f54 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -232,15 +232,13 @@ err_register:
return ret;
}
-static int tosa_lcd_remove(struct spi_device *spi)
+static void tosa_lcd_remove(struct spi_device *spi)
{
struct tosa_lcd_data *data = spi_get_drvdata(spi);
i2c_unregister_device(data->i2c);
tosa_lcd_tg_off(data);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 3567b45f9ba9..bfc1913e8b55 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -233,11 +233,9 @@ static int vgg2432a4_probe(struct spi_device *spi)
return 0;
}
-static int vgg2432a4_remove(struct spi_device *spi)
+static void vgg2432a4_remove(struct spi_device *spi)
{
ili9320_remove(spi_get_drvdata(spi));
-
- return 0;
}
static void vgg2432a4_shutdown(struct spi_device *spi)
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 840d9813b0bc..fcc46380e7c9 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ bool "Enable legacy fbcon hardware acceleration code"
+ depends on FRAMEBUFFER_CONSOLE
+ default y if PARISC
+ default n
+ help
+ This option enables the fbcon (framebuffer text-based) hardware
+ acceleration for graphics drivers which were written for the fbdev
+ graphics interface.
+
+ On modern machines, on mainstream machines (like x86-64) or when
+ using a modern Linux distribution those fbdev drivers usually aren't used.
+ So enabling this option wouldn't have any effect, which is why you want
+ to disable this option on such newer machines.
+
+ If you compile this kernel for older machines which still require the
+ fbdev drivers, you may want to say Y.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
bool "Map the console to the primary display device"
depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 6ed5e608dd04..93b8d84c34cf 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -829,7 +829,7 @@ config FB_PVR2
You can pass several parameters to the driver at boot time or at
module load time. The parameters look like "video=pvr2:XXX", where
the meaning of XXX can be found at the end of the main source file
- (<file:drivers/video/pvr2fb.c>). Please see the file
+ (<file:drivers/video/fbdev/pvr2fb.c>). Please see the file
<file:Documentation/fb/pvr2fb.rst>.
config FB_OPENCORES
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 84c56f525889..f8ef62542f7f 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -110,7 +110,7 @@ static const struct fb_ops asiliantfb_ops = {
static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div)
{
unsigned pixclock = *ppixclock;
- unsigned Ftarget = 1000000 * (1000000 / pixclock);
+ unsigned Ftarget;
unsigned n;
unsigned best_error = 0xffffffff;
unsigned best_m = 0xffffffff,
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index e3812a8ff55a..52a35b661643 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -76,8 +76,6 @@
#define SWITCH_SND7 0x80
#define SWITCH_NONE 0x00
-#define up(x, r) (((x) + (r) - 1) & ~((r)-1))
-
static int default_par; /* default resolution (0=none) */
@@ -487,8 +485,8 @@ static struct fb_videomode atafb_modedb[] __initdata = {
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
}, {
- /* 1280x960, 29 kHz, 60 Hz (TT high) */
- "tt-high", 57, 640, 960, 31041, 120, 100, 8, 16, 140, 30,
+ /* 1280x960, 72 kHz, 72 Hz (TT high) */
+ "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
},
@@ -1649,12 +1647,12 @@ static int falcon_pan_display(struct fb_var_screeninfo *var,
int bpp = info->var.bits_per_pixel;
if (bpp == 1)
- var->xoffset = up(var->xoffset, 32);
+ var->xoffset = round_up(var->xoffset, 32);
if (bpp != 16)
par->hw.falcon.xoffset = var->xoffset & 15;
else {
par->hw.falcon.xoffset = 0;
- var->xoffset = up(var->xoffset, 2);
+ var->xoffset = round_up(var->xoffset, 2);
}
par->hw.falcon.line_offset = bpp *
(info->var.xres_virtual - info->var.xres) / 16;
@@ -1683,9 +1681,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red,
((blue & 0xfc00) >> 8));
if (regno < 16) {
shifter_tt.color_reg[regno] =
- (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
- (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
- ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
+ ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) |
+ ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) |
+ ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11));
@@ -1971,9 +1969,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red,
green >>= 12;
if (ATARIHW_PRESENT(EXTD_SHIFTER))
shifter_tt.color_reg[regno] =
- (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) |
- (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) |
- ((blue & 0xe) >> 1) | ((blue & 1) << 3);
+ ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) |
+ ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) |
+ ((blue & 0xe) >> 1) | ((blue & 1) << 3);
else
shifter_tt.color_reg[regno] =
((red & 0xe) << 7) |
@@ -2268,7 +2266,7 @@ static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
return -EINVAL;
- var->xoffset = up(var->xoffset, 16);
+ var->xoffset = round_up(var->xoffset, 16);
par->screen_base = screen_base +
(var->yoffset * info->var.xres_virtual + var->xoffset)
* info->var.bits_per_pixel / 8;
@@ -2406,16 +2404,6 @@ static void atafb_set_disp(struct fb_info *info)
atari_stram_to_virt(info->fix.smem_start));
}
-static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- red >>= 8;
- green >>= 8;
- blue >>= 8;
-
- return info->fbops->fb_setcolreg(regno, red, green, blue, transp, info);
-}
-
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
@@ -2726,7 +2714,6 @@ static struct fb_ops atafb_ops = {
.owner = THIS_MODULE,
.fb_check_var = atafb_check_var,
.fb_set_par = atafb_set_par,
- .fb_setcolreg = atafb_setcolreg,
.fb_blank = atafb_blank,
.fb_pan_display = atafb_pan_display,
.fb_fillrect = atafb_fillrect,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 355b6120dc4f..1fc8de4ecbeb 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&info->modelist);
- if (pdev->dev.of_node) {
- ret = atmel_lcdfb_of_init(sinfo);
- if (ret)
- goto free_info;
- } else {
+ if (!pdev->dev.of_node) {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
}
+ ret = atmel_lcdfb_of_init(sinfo);
+ if (ret)
+ goto free_info;
+
+ ret = -ENODEV;
if (!sinfo->config)
goto free_info;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index e6a48689c294..6ff16d3132e5 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -952,7 +952,7 @@ static void aty128_timings(struct aty128fb_par *par)
u32 x_mpll_ref_fb_div;
u32 xclk_cntl;
u32 Nx, M;
- unsigned PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 };
+ static const unsigned int PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 };
#endif
if (!par->constants.ref_clk)
@@ -1321,8 +1321,10 @@ static void aty128_set_pll(struct aty128_pll *pll,
{
u32 div3;
- unsigned char post_conv[] = /* register values for post dividers */
- { 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7 };
+ /* register values for post dividers */
+ static const unsigned char post_conv[] = {
+ 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7
+ };
/* select PPLL_DIV_3 */
aty_st_le32(CLOCK_CNTL_INDEX, aty_ld_le32(CLOCK_CNTL_INDEX) | (3 << 8));
@@ -1360,7 +1362,7 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
const struct aty128fb_par *par)
{
const struct aty128_constants c = par->constants;
- unsigned char post_dividers[] = {1,2,4,8,3,6,12};
+ static const unsigned char post_dividers[] = { 1, 2, 4, 8, 3, 6, 12 };
u32 output_freq;
u32 vclk; /* in .01 MHz */
int i = 0;
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 011b07e44e0d..e967536af166 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -22,13 +22,11 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par)
{
- u8 res;
/* write addr byte */
aty_st_8(CLOCK_CNTL_ADDR, (offset << 2) & PLL_ADDR, par);
/* read the register value */
- res = aty_ld_8(CLOCK_CNTL_DATA, par);
- return res;
+ return aty_ld_8(CLOCK_CNTL_DATA, par);
}
static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 9c37e28fb78b..d06d24830080 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -352,10 +352,8 @@ static int aty_var_to_pll_18818(const struct fb_info *info, u32 vclk_per,
post_divider = 1;
if (MHz100 > MAX_FREQ_2595) {
- MHz100 = MAX_FREQ_2595;
return -EINVAL;
} else if (MHz100 < ABS_MIN_FREQ_2595) {
- program_bits = 0; /* MHz100 = 257 */
return -EINVAL;
} else {
while (MHz100 < MIN_FREQ_2595) {
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 37a6512feda0..52f731a61482 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -239,7 +239,7 @@ int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned
u32 value;
fbdev = to_au1100fb_device(fbi);
- palette = fbdev->regs->lcd_pallettebase;
+ palette = fbdev->regs->lcd_palettebase;
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
diff --git a/drivers/video/fbdev/au1100fb.h b/drivers/video/fbdev/au1100fb.h
index e7239bceefd3..79f4048726f1 100644
--- a/drivers/video/fbdev/au1100fb.h
+++ b/drivers/video/fbdev/au1100fb.h
@@ -92,7 +92,7 @@ struct au1100fb_regs
u32 lcd_pwmdiv;
u32 lcd_pwmhi;
u32 reserved[(0x0400-0x002C)/4];
- u32 lcd_pallettebase[256];
+ u32 lcd_palettebase[256];
};
struct au1100fb_device {
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index c00e01a17368..81c315454428 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1233,8 +1233,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
- return dma_mmap_attrs(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
- fbdev->fb_len, 0);
+ return dma_mmap_coherent(fbdev->dev, vma,
+ fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 93802abbbc72..3d47c347b897 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq)
return 0;
}
-static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
+static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var,
struct fb_info *info)
{
long freq;
@@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
unsigned maxclockidx = var->bits_per_pixel >> 3;
/* convert from ps to kHz */
- freq = PICOS2KHZ(var->pixclock);
-
- dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+ freq = PICOS2KHZ(var->pixclock ? : 1);
maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx];
cinfo->multiplexing = 0;
@@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
/* If the frequency is greater than we can support, we might be able
* to use multiplexing for the video mode */
if (freq > maxclock) {
- dev_err(info->device,
- "Frequency greater than maxclock (%ld kHz)\n",
- maxclock);
- return -EINVAL;
+ var->pixclock = KHZ2PICOS(maxclock);
+
+ while ((freq = PICOS2KHZ(var->pixclock)) > maxclock)
+ var->pixclock++;
}
+ dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+
/*
* Additional constraint: 8bpp uses DAC clock doubling to allow maximum
* pixel clock
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 509311471d51..bd59e7b11ed5 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -67,7 +67,9 @@
#define out_8(addr, val) (void)(val)
#define in_le32(addr) 0
#define out_le32(addr, val) (void)(val)
+#ifndef pgprot_cached_wthru
#define pgprot_cached_wthru(prot) (prot)
+#endif
#else
static void invalid_vram_cache(void __force *addr)
{
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 01fae2c96965..f98e8f298bc1 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute,
}
}
+static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_copyarea area;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info)
void fbcon_set_bitops(struct fbcon_ops *ops)
{
+ ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index a591d291b231..d0b0b05e0dff 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -151,15 +151,8 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
-static int fb_deferred_io_set_page_dirty(struct page *page)
-{
- if (!PageDirty(page))
- SetPageDirty(page);
- return 0;
-}
-
static const struct address_space_operations fb_deferred_io_aops = {
- .set_page_dirty = fb_deferred_io_set_page_dirty,
+ .dirty_folio = noop_dirty_folio,
};
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 99ecd9a6d844..2fc1b80a26ad 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
@@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
+static __inline__ void ywrap_up(struct vc_data *vc, int count);
+static __inline__ void ywrap_down(struct vc_data *vc, int count);
+static __inline__ void ypan_up(struct vc_data *vc, int count);
+static __inline__ void ypan_down(struct vc_data *vc, int count);
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
@@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->graphics = 0;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
+ !(info->flags & FBINFO_HWACCEL_DISABLED))
+ p->scrollmode = SCROLL_MOVE;
+ else /* default to something safe */
+ p->scrollmode = SCROLL_REDRAW;
+#endif
+
/*
* ++guenther: console.c:vc_allocate() relies on initializing
* vc_{cols,rows}, but we must not set those if we are only
@@ -1211,13 +1229,14 @@ finished:
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
+ * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
* anywhere on this display. These functions convert lineoffsets to
* bitmap offsets and deal with the wrap-around case by splitting blits.
*
+ * fbcon_bmove_physical_8() -- These functions fast implementations
* fbcon_clear_physical_8() -- of original fbcon_XXX fns.
* fbcon_putc_physical_8() -- (font width != 8) may be added later
*
@@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
}
}
+static __inline__ void ywrap_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+ if (p->yscroll >= p->vrows) /* Deal with wrap */
+ p->yscroll -= p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ywrap_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) /* Deal with wrap */
+ p->yscroll += p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll += count;
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ ops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll -= p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ p->yscroll -= p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) {
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll += p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+
+ if (p->yscroll < 0) {
+ p->yscroll += p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+{
+ unsigned short *s = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+ unsigned short attr = 1;
+
+ do {
+ c = scr_readw(s);
+ if (attr != (c & 0xff00)) {
+ attr = c & 0xff00;
+ if (s > start) {
+ fbcon_putcs(vc, start, s - start,
+ dy, x);
+ x += s - start;
+ start = s;
+ }
+ }
+ console_conditional_schedule();
+ s++;
+ } while (s < le);
+ if (s > start)
+ fbcon_putcs(vc, start, s - start, dy, x);
+ console_conditional_schedule();
+ dy++;
+ }
+}
+
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct fbcon_display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
@@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_display *p = &fb_display[vc->vc_num];
+ int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
return true;
@@ -1466,32 +1704,291 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, t, b - t - count,
- count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ywrap_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_redraw_move(vc, p, 0, t, count);
+ ypan_up_redraw(vc, t, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b,
+ vc->vc_rows - b, b);
+ } else
+ fbcon_redraw_move(vc, p, t + count, b - t - count, t);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ypan_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_up:
+ fbcon_redraw(vc, p, t, b - t - count,
+ count * vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
+ break;
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, b - 1, b - t - count,
- -count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_down;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ywrap_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ypan_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
+ b - count);
+ ypan_down_redraw(vc, t, count);
+ if (t > 0)
+ fbcon_redraw_move(vc, p, count, t, 0);
+ } else
+ fbcon_redraw_move(vc, p, t, b - t - count, t + count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_down:
+ fbcon_redraw(vc, p, b - 1, b - t - count,
+ -count * vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
}
return false;
}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void updatescrollmode_accel(struct fbcon_display *p,
+ struct fb_info *info,
+ struct vc_data *vc)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ struct fbcon_ops *ops = info->fbcon_par;
+ int cap = info->flags;
+ u16 t = 0;
+ int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
+ info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
+ info->var.xres_virtual);
+ int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
+ divides(ypan, vc->vc_font.height) && vyres > yres;
+ int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
+ divides(ywrap, vc->vc_font.height) &&
+ divides(vc->vc_font.height, vyres) &&
+ divides(vc->vc_font.height, yres);
+ int reading_fast = cap & FBINFO_READS_FAST;
+ int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+ int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+
+ if (good_wrap || good_pan) {
+ if (reading_fast || fast_copyarea)
+ p->scrollmode = good_wrap ?
+ SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
+ else
+ p->scrollmode = good_wrap ? SCROLL_REDRAW :
+ SCROLL_PAN_REDRAW;
+ } else {
+ if (reading_fast || (fast_copyarea && !fast_imageblit))
+ p->scrollmode = SCROLL_MOVE;
+ else
+ p->scrollmode = SCROLL_REDRAW;
+ }
+#endif
+}
+
static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -1507,6 +2004,9 @@ static void updatescrollmode(struct fbcon_display *p,
p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
if ((yres % fh) && (vyres % fh < yres % fh))
p->vrows--;
+
+ /* update scrollmode in case hardware acceleration is used */
+ updatescrollmode_accel(p, info, vc);
}
#define PITCH(w) (((w) + 7) >> 3)
@@ -1664,7 +2164,21 @@ static int fbcon_switch(struct vc_data *vc)
updatescrollmode(p, info, vc);
- scrollback_phys_max = 0;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_WRAP_MOVE:
+ scrollback_phys_max = p->vrows - vc->vc_rows;
+ break;
+ case SCROLL_PAN_MOVE:
+ case SCROLL_PAN_REDRAW:
+ scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
+ if (scrollback_phys_max < 0)
+ scrollback_phys_max = 0;
+ break;
+ default:
+ scrollback_phys_max = 0;
+ break;
+ }
+
scrollback_max = 0;
scrollback_current = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index a00603b4451a..969d41ecede5 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -29,6 +29,9 @@ struct fbcon_display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ u_short scrollmode; /* Scroll Method, use fb_scrollmode() */
+#endif
u_short inverse; /* != 0 text black on white as default */
short yscroll; /* Hardware scrolling */
int vrows; /* number of virtual rows */
@@ -51,6 +54,8 @@ struct fbcon_display {
};
struct fbcon_ops {
+ void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
@@ -149,6 +154,73 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
+ /*
+ * Scroll Method
+ */
+
+/* There are several methods fbcon can use to move text around the screen:
+ *
+ * Operation Pan Wrap
+ *---------------------------------------------
+ * SCROLL_MOVE copyarea No No
+ * SCROLL_PAN_MOVE copyarea Yes No
+ * SCROLL_WRAP_MOVE copyarea No Yes
+ * SCROLL_REDRAW imageblit No No
+ * SCROLL_PAN_REDRAW imageblit Yes No
+ * SCROLL_WRAP_REDRAW imageblit No Yes
+ *
+ * (SCROLL_WRAP_REDRAW is not implemented yet)
+ *
+ * In general, fbcon will choose the best scrolling
+ * method based on the rule below:
+ *
+ * Pan/Wrap > accel imageblit > accel copyarea >
+ * soft imageblit > (soft copyarea)
+ *
+ * Exception to the rule: Pan + accel copyarea is
+ * preferred over Pan + accel imageblit.
+ *
+ * The above is typical for PCI/AGP cards. Unless
+ * overridden, fbcon will never use soft copyarea.
+ *
+ * If you need to override the above rule, set the
+ * appropriate flags in fb_info->flags. For example,
+ * to prefer copyarea over imageblit, set
+ * FBINFO_READS_FAST.
+ *
+ * Other notes:
+ * + use the hardware engine to move the text
+ * (hw-accelerated copyarea() and fillrect())
+ * + use hardware-supported panning on a large virtual screen
+ * + amifb can not only pan, but also wrap the display by N lines
+ * (i.e. visible line i = physical line (i+N) % yres).
+ * + read what's already rendered on the screen and
+ * write it in a different place (this is cfb_copyarea())
+ * + re-render the text to the screen
+ *
+ * Whether to use wrapping or panning can only be figured out at
+ * runtime (when we know whether our font height is a multiple
+ * of the pan/wrap step)
+ *
+ */
+
+#define SCROLL_MOVE 0x001
+#define SCROLL_PAN_MOVE 0x002
+#define SCROLL_WRAP_MOVE 0x003
+#define SCROLL_REDRAW 0x004
+#define SCROLL_PAN_REDRAW 0x005
+
+static inline u_short fb_scrollmode(struct fbcon_display *fb)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ return fb->scrollmode;
+#else
+ /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */
+ return SCROLL_REDRAW;
+#endif
+}
+
+
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index ffa78936eaab..2789ace79634 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+
+ area.sx = sy * vc->vc_font.height;
+ area.sy = vyres - ((sx + width) * vc->vc_font.width);
+ area.dx = dy * vc->vc_font.height;
+ area.dy = vyres - ((dx + width) * vc->vc_font.width);
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
@@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
@@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info)
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
+ ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 92e5b7fb51ee..86a254c1b2b7 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sx = vxres - ((sy + height) * vc->vc_font.height);
+ area.sy = sx * vc->vc_font.width;
+ area.dx = vxres - ((dy + height) * vc->vc_font.height);
+ area.dy = dx * vc->vc_font.width;
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
static int cw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
u32 xoffset;
int err;
@@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info)
void fbcon_rotate_cw(struct fbcon_ops *ops)
{
+ ops->bmove = cw_bmove;
ops->clear = cw_clear;
ops->putcs = cw_putcs;
ops->clear_margins = cw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index b528b2e54283..01cbe303b8a2 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -11,6 +11,15 @@
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
+#define GETVYRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \
+ (i)->var.yres : (i)->var.yres_virtual; })
+
+#define GETVXRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
+ (i)->var.xres : (i)->var.xres_virtual; })
+
+
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 09619bd8e021..23bc045769d0 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sy = vyres - ((sy + height) * vc->vc_font.height);
+ area.sx = vxres - ((sx + width) * vc->vc_font.width);
+ area.dy = vyres - ((dy + height) * vc->vc_font.height);
+ area.dx = vxres - ((dx + width) * vc->vc_font.width);
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
int err;
xoffset = vxres - info->var.xres - ops->var.xoffset;
@@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info)
void fbcon_rotate_ud(struct fbcon_ops *ops)
{
+ ops->bmove = ud_bmove;
ops->clear = ud_clear;
ops->putcs = ud_putcs;
ops->clear_margins = ud_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 55d2bd0ce5c0..64843464c661 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt)
static void fb_cvt_print_name(struct fb_cvt_data *cvt)
{
u32 pixcount, pixcount_mod;
- int cnt = 255, offset = 0, read = 0;
- u8 *buf = kzalloc(256, GFP_KERNEL);
+ int size = 256;
+ int off = 0;
+ u8 *buf;
+ buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return;
@@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt)
pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
pixcount_mod /= 1000;
- read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
- cvt->xres, cvt->yres, cvt->refresh);
- offset += read;
- cnt -= read;
+ off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ",
+ cvt->xres, cvt->yres, cvt->refresh);
- if (cvt->status)
- snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
- "Pixel Image\n", pixcount, pixcount_mod);
- else {
- if (pixcount) {
- read = snprintf(buf+offset, cnt, "%d", pixcount);
- cnt -= read;
- offset += read;
- }
+ if (cvt->status) {
+ off += scnprintf(buf + off, size - off,
+ "Not a CVT standard - %d.%03d Mega Pixel Image\n",
+ pixcount, pixcount_mod);
+ } else {
+ if (pixcount)
+ off += scnprintf(buf + off, size - off, "%d", pixcount);
- read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
- cnt -= read;
- offset += read;
+ off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod);
if (cvt->aspect_ratio == 0)
- read = snprintf(buf+offset, cnt, "3");
+ off += scnprintf(buf + off, size - off, "3");
else if (cvt->aspect_ratio == 3)
- read = snprintf(buf+offset, cnt, "4");
+ off += scnprintf(buf + off, size - off, "4");
else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
- read = snprintf(buf+offset, cnt, "9");
+ off += scnprintf(buf + off, size - off, "9");
else if (cvt->aspect_ratio == 2)
- read = snprintf(buf+offset, cnt, "A");
- else
- read = 0;
- cnt -= read;
- offset += read;
-
- if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
- read = snprintf(buf+offset, cnt, "-R");
- cnt -= read;
- offset += read;
- }
+ off += scnprintf(buf + off, size - off, "A");
+
+ if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
+ off += scnprintf(buf + off, size - off, "-R");
}
printk(KERN_INFO "%s\n", buf);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0fa7ede94fa6..8df3ac991e5a 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -396,18 +396,14 @@ static void fb_rotate_logo(struct fb_info *info, u8 *dst,
} else if (rotate == FB_ROTATE_CW) {
fb_rotate_logo_cw(image->data, dst, image->width,
image->height);
- tmp = image->width;
- image->width = image->height;
- image->height = tmp;
+ swap(image->width, image->height);
tmp = image->dy;
image->dy = image->dx;
image->dx = info->var.xres - image->width - tmp;
} else if (rotate == FB_ROTATE_CCW) {
fb_rotate_logo_ccw(image->data, dst, image->width,
image->height);
- tmp = image->width;
- image->width = image->height;
- image->height = tmp;
+ swap(image->width, image->height);
tmp = image->dx;
image->dx = image->dy;
image->dy = info->var.yres - image->height - tmp;
@@ -1160,6 +1156,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
ret = fbcon_set_con2fb_map_ioctl(argp);
break;
case FBIOBLANK:
+ if (arg > FB_BLANK_POWERDOWN)
+ return -EINVAL;
console_lock();
lock_fb_info(info);
ret = fb_blank(info, arg);
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 72af95053bcb..2768eff247ba 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -16,6 +16,21 @@
#include <asm/types.h>
#include "fbcon.h"
+static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_tilearea area;
+
+ area.sx = sx;
+ area.sy = sy;
+ area.dx = dx;
+ area.dy = dy;
+ area.height = height;
+ area.width = width;
+
+ info->tileops->fb_tilecopy(info, &area);
+}
+
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
+ ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 005ac3c17aa1..ae76a2111c77 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1354,10 +1354,9 @@ static int fb_probe(struct platform_device *device)
return PTR_ERR(da8xx_fb_reg_base);
tmp_lcdc_clk = devm_clk_get(&device->dev, "fck");
- if (IS_ERR(tmp_lcdc_clk)) {
- dev_err(&device->dev, "Can not get device clock\n");
- return PTR_ERR(tmp_lcdc_clk);
- }
+ if (IS_ERR(tmp_lcdc_clk))
+ return dev_err_probe(&device->dev, PTR_ERR(tmp_lcdc_clk),
+ "Can not get device clock\n");
pm_runtime_enable(&device->dev);
pm_runtime_get_sync(&device->dev);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 23999df52739..c8e0ea27caf1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -287,8 +287,6 @@ struct hvfb_par {
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
-static uint screen_width_max = HVFB_WIDTH;
-static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
@@ -582,7 +580,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
int ret = 0;
unsigned long t;
u8 index;
- int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
@@ -613,13 +610,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
goto out;
}
- for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
- screen_width_max = max_t(unsigned int, screen_width_max,
- msg->resolution_resp.supported_resolution[i].width);
- screen_height_max = max_t(unsigned int, screen_height_max,
- msg->resolution_resp.supported_resolution[i].height);
- }
-
screen_width =
msg->resolution_resp.supported_resolution[index].width;
screen_height =
@@ -941,7 +931,7 @@ static void hvfb_get_option(struct fb_info *info)
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
- (x > screen_width_max || y > screen_height_max)) ||
+ (x * y * screen_depth / 8 > screen_fb_size)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -1194,8 +1184,8 @@ static int hvfb_probe(struct hv_device *hdev,
}
hvfb_get_option(info);
- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
- screen_width, screen_height, screen_depth);
+ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
+ screen_width, screen_height, screen_depth, screen_fb_size);
ret = hvfb_getmem(hdev, info);
if (ret) {
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index ad598257ab38..68288756ffff 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1083,6 +1083,8 @@ static int imxfb_remove(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
imxfb_disable_controller(fbi);
diff --git a/drivers/video/fbdev/kyro/STG4000InitDevice.c b/drivers/video/fbdev/kyro/STG4000InitDevice.c
index 21875d3c2dc2..edfa0a04854d 100644
--- a/drivers/video/fbdev/kyro/STG4000InitDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000InitDevice.c
@@ -124,7 +124,7 @@ u32 ProgramClock(u32 refClock,
u32 ulScore, ulPhaseScore, ulVcoScore;
u32 ulTmp = 0, ulVCO;
u32 ulScaleClockReq, ulMinClock, ulMaxClock;
- u32 ODValues[] = { 1, 2, 0 };
+ static const unsigned char ODValues[] = { 1, 2, 0 };
/* Translate clock in Hz */
coreClock *= 100; /* in Hz */
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 5c82611e93d9..236521b19daf 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1377,7 +1377,7 @@ static struct video_board vbG200 = {
.lowlevel = &matrox_G100
};
static struct video_board vbG200eW = {
- .maxvram = 0x800000,
+ .maxvram = 0x100000,
.maxdisplayable = 0x800000,
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index d40b806461ca..61aed7fc0b8d 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
@@ -132,7 +132,7 @@ static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy,
cmd[2] = (height << 16) | width;
i = 0;
- line = ptr = image->data;
+ line = image->data;
bytes = image->width;
while (i < height) {
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 061a105afb86..a9df8ee79810 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,8 @@ static int mmphw_probe(struct platform_device *pdev)
/* get clock */
ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
if (IS_ERR(ctrl->clk)) {
- dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
+ dev_err_probe(ctrl->dev, ret,
+ "unable to get clk %s\n", mi->clk_name);
ret = -ENOENT;
goto failed;
}
diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c
index d7994a173245..0b48965a6420 100644
--- a/drivers/video/fbdev/nvidia/nv_i2c.c
+++ b/drivers/video/fbdev/nvidia/nv_i2c.c
@@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
{
int rc;
- strcpy(chan->adapter.name, name);
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = i2c_class;
chan->adapter.algo_data = &chan->algo;
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index bfa4ed421148..da7e1457e58f 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -387,7 +387,7 @@ static int ocfb_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id ocfb_match[] = {
+static const struct of_device_id ocfb_match[] = {
{ .compatible = "opencores,ocfb", },
{},
};
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 4501e848a36f..afdb6aa48add 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -412,7 +412,7 @@ static void __init offb_init_fb(const char *name,
info = framebuffer_alloc(sizeof(u32) * 16, NULL);
- if (info == 0) {
+ if (!info) {
release_mem_region(res_start, res_size);
return;
}
diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c
index 8e54aae544a0..bbf871f9d862 100644
--- a/drivers/video/fbdev/omap/lcd_ams_delta.c
+++ b/drivers/video/fbdev/omap/lcd_ams_delta.c
@@ -131,18 +131,14 @@ static int ams_delta_panel_probe(struct platform_device *pdev)
int ret;
gpiod_vblen = devm_gpiod_get(&pdev->dev, "vblen", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_vblen)) {
- ret = PTR_ERR(gpiod_vblen);
- dev_err(&pdev->dev, "VBLEN GPIO request failed (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(gpiod_vblen))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpiod_vblen),
+ "VBLEN GPIO request failed\n");
gpiod_ndisp = devm_gpiod_get(&pdev->dev, "ndisp", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_ndisp)) {
- ret = PTR_ERR(gpiod_ndisp);
- dev_err(&pdev->dev, "NDISP GPIO request failed (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(gpiod_ndisp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpiod_ndisp),
+ "NDISP GPIO request failed\n");
#ifdef CONFIG_LCD_CLASS_DEVICE
lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL,
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index a75ae0c9b14c..03cff39d392d 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -570,14 +570,12 @@ static int mipid_spi_probe(struct spi_device *spi)
return 0;
}
-static int mipid_spi_remove(struct spi_device *spi)
+static void mipid_spi_remove(struct spi_device *spi)
{
struct mipid_device *md = dev_get_drvdata(&spi->dev);
mipid_disable(&md->panel);
kfree(md);
-
- return 0;
}
static struct spi_driver mipid_spi_driver = {
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index b495c09e6102..083388a4ceeb 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#include <linux/omap-dma.h>
@@ -1303,7 +1304,7 @@ static ssize_t omapfb_show_panel_name(struct device *dev,
{
struct omapfb_device *fbdev = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
+ return sysfs_emit(buf, "%s\n", fbdev->panel->name);
}
static ssize_t omapfb_show_bklight_level(struct device *dev,
@@ -1314,8 +1315,8 @@ static ssize_t omapfb_show_bklight_level(struct device *dev,
int r;
if (fbdev->panel->get_bklight_level) {
- r = snprintf(buf, PAGE_SIZE, "%d\n",
- fbdev->panel->get_bklight_level(fbdev->panel));
+ r = sysfs_emit(buf, "%d\n",
+ fbdev->panel->get_bklight_level(fbdev->panel));
} else
r = -ENODEV;
return r;
@@ -1348,8 +1349,8 @@ static ssize_t omapfb_show_bklight_max(struct device *dev,
int r;
if (fbdev->panel->get_bklight_level) {
- r = snprintf(buf, PAGE_SIZE, "%d\n",
- fbdev->panel->get_bklight_max(fbdev->panel));
+ r = sysfs_emit(buf, "%d\n",
+ fbdev->panel->get_bklight_max(fbdev->panel));
} else
r = -ENODEV;
return r;
@@ -1379,7 +1380,7 @@ static ssize_t omapfb_show_ctrl_name(struct device *dev,
{
struct omapfb_device *fbdev = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
+ return sysfs_emit(buf, "%s\n", fbdev->ctrl->name);
}
static struct device_attribute dev_attr_ctrl_name =
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index 2fa436475b40..c8ad3ef42bd3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -246,6 +246,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4b0793abdd84..a2c7c5cb1523 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+ return sysfs_emit(buf, "%d\n", errors);
}
static ssize_t dsicm_hw_revision_show(struct device *dev,
@@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+ return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
}
static ssize_t dsicm_store_ulps(struct device *dev,
@@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
t = ddata->ulps_enabled;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static ssize_t dsicm_store_ulps_timeout(struct device *dev,
@@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
t = ddata->ulps_timeout;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 1bec7a4422e8..3ce1f9d2e7c4 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -244,10 +244,9 @@ static int lb035q02_probe_of(struct spi_device *spi)
struct gpio_desc *gpio;
gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return PTR_ERR(gpio);
- }
+ if (IS_ERR(gpio))
+ return dev_err_probe(&spi->dev, PTR_ERR(gpio),
+ "failed to parse enable gpio\n");
ddata->enable_gpio = gpio;
@@ -316,7 +315,7 @@ err_gpio:
return r;
}
-static int lb035q02_panel_spi_remove(struct spi_device *spi)
+static void lb035q02_panel_spi_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = spi_get_drvdata(spi);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -328,8 +327,6 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi)
lb035q02_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id lb035q02_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index dff9ebbadfc0..be9910ff6e62 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -327,7 +327,7 @@ err_gpio:
return r;
}
-static int nec_8048_remove(struct spi_device *spi)
+static void nec_8048_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -341,8 +341,6 @@ static int nec_8048_remove(struct spi_device *spi)
nec_8048_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 602324c5c9f9..f1072c319de8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -211,10 +211,9 @@ static int sharp_ls_probe_of(struct platform_device *pdev)
int r;
ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
- if (IS_ERR(ddata->vcc)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(ddata->vcc);
- }
+ if (IS_ERR(ddata->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ddata->vcc),
+ "failed to get regulator\n");
/* lcd INI */
r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 8d8b5ff7d43c..c0965bee12c5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev,
int i;
if (!ddata->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+ return sysfs_emit(buf, "%s\n", cabc_modes[0]);
for (i = 0, len = 0;
len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
@@ -857,7 +857,7 @@ err_gpio:
return r;
}
-static int acx565akm_remove(struct spi_device *spi)
+static void acx565akm_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -874,8 +874,6 @@ static int acx565akm_remove(struct spi_device *spi)
acx565akm_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id acx565akm_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 595ebd8bd5dc..3c0f887d3092 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -425,7 +425,7 @@ err_reg:
return r;
}
-static int td028ttec1_panel_remove(struct spi_device *spi)
+static void td028ttec1_panel_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -439,8 +439,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
td028ttec1_panel_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id td028ttec1_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index afac1d9445aa..c0e4e0315b6b 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
+ return sysfs_emit(buf, "%d\n", ddata->vmirror);
}
static ssize_t tpo_td043_vmirror_store(struct device *dev,
@@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
+ return sysfs_emit(buf, "%d\n", ddata->mode);
}
static ssize_t tpo_td043_mode_store(struct device *dev,
@@ -517,8 +517,7 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(ddata->vcc_reg)) {
- dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- r = PTR_ERR(ddata->vcc_reg);
+ r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n");
goto err_regulator;
}
@@ -564,7 +563,7 @@ err_regulator:
return r;
}
-static int tpo_td043_remove(struct spi_device *spi)
+static void tpo_td043_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -580,8 +579,6 @@ static int tpo_td043_remove(struct spi_device *spi)
omap_dss_put_device(in);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 8f355d1caf86..bc5a44c2a144 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -265,6 +265,7 @@ static struct attribute *display_sysfs_attrs[] = {
&display_attr_wss.attr,
NULL
};
+ATTRIBUTE_GROUPS(display_sysfs);
static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -303,7 +304,7 @@ static const struct sysfs_ops display_sysfs_ops = {
static struct kobj_type display_ktype = {
.sysfs_ops = &display_sysfs_ops,
- .default_attrs = display_sysfs_attrs,
+ .default_groups = display_sysfs_groups,
};
int display_init_sysfs(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index 3ffb1fe4a38a..ba21c4a2633d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -457,6 +457,7 @@ static struct attribute *manager_sysfs_attrs[] = {
&manager_attr_cpr_coef.attr,
NULL
};
+ATTRIBUTE_GROUPS(manager_sysfs);
static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -495,7 +496,7 @@ static const struct sysfs_ops manager_sysfs_ops = {
static struct kobj_type manager_ktype = {
.sysfs_ops = &manager_sysfs_ops,
- .default_attrs = manager_sysfs_attrs,
+ .default_groups = manager_sysfs_groups,
};
int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 421dcb7564ad..601c0beb6de9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -390,6 +390,7 @@ static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_zorder.attr,
NULL
};
+ATTRIBUTE_GROUPS(overlay_sysfs);
static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -428,7 +429,7 @@ static const struct sysfs_ops overlay_sysfs_ops = {
static struct kobj_type overlay_ktype = {
.sysfs_ops = &overlay_sysfs_ops,
- .default_attrs = overlay_sysfs_attrs,
+ .default_groups = overlay_sysfs_groups,
};
int dss_overlay_kobj_init(struct omap_overlay *ovl,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index a3decc7fadde..afa688e754b9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1854,7 +1854,6 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
if (fbdev->auto_update_wq != NULL) {
- flush_workqueue(fbdev->auto_update_wq);
destroy_workqueue(fbdev->auto_update_wq);
fbdev->auto_update_wq = NULL;
}
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 47e6a1d0d229..e943300d23e8 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -593,8 +593,8 @@ static void pxa168fb_init_mode(struct fb_info *info,
static int pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
- struct fb_info *info = 0;
- struct pxa168fb_info *fbi = 0;
+ struct fb_info *info = NULL;
+ struct pxa168fb_info *fbi = NULL;
struct resource *res;
struct clk *clk;
int irq, ret;
@@ -606,10 +606,9 @@ static int pxa168fb_probe(struct platform_device *pdev)
}
clk = devm_clk_get(&pdev->dev, "LCDCLK");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get LCDCLK");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get LCDCLK");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -618,10 +617,8 @@ static int pxa168fb_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ if (irq < 0)
return -ENOENT;
- }
info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
if (info == NULL) {
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 4279e13a3b58..350b3139c863 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -606,17 +606,13 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
/* enable the clock */
priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to get clock\n");
/* request the IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no IRQ defined: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
0, DRV_NAME, priv);
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 3b134e1bbc38..fe3c8b6935cf 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1360,7 +1360,6 @@ static int s3c_fb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct s3c_fb_platdata *pd;
struct s3c_fb *sfb;
- struct resource *res;
int win;
int ret = 0;
u32 reg;
@@ -1392,18 +1391,17 @@ static int s3c_fb_probe(struct platform_device *pdev)
spin_lock_init(&sfb->slock);
sfb->bus_clk = devm_clk_get(dev, "lcd");
- if (IS_ERR(sfb->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(sfb->bus_clk);
- }
+ if (IS_ERR(sfb->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(sfb->bus_clk),
+ "failed to get bus clock\n");
clk_prepare_enable(sfb->bus_clk);
if (!sfb->variant.has_clksel) {
sfb->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(sfb->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(sfb->lcd_clk);
+ ret = dev_err_probe(dev, PTR_ERR(sfb->lcd_clk),
+ "failed to get lcd clock\n");
goto err_bus_clk;
}
@@ -1418,13 +1416,12 @@ static int s3c_fb_probe(struct platform_device *pdev)
goto err_lcd_clk;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to acquire irq resource\n");
+ sfb->irq_no = platform_get_irq(pdev, 0);
+ if (sfb->irq_no < 0) {
ret = -ENOENT;
goto err_lcd_clk;
}
- sfb->irq_no = res->start;
+
ret = devm_request_irq(dev, sfb->irq_no, s3c_fb_irq,
0, "s3c_fb", sfb);
if (ret) {
@@ -1810,4 +1807,3 @@ module_platform_driver(s3c_fb_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-fb");
diff --git a/drivers/video/fbdev/savage/savagefb.h b/drivers/video/fbdev/savage/savagefb.h
index 3314d5b6b43b..b6b8cc208293 100644
--- a/drivers/video/fbdev/savage/savagefb.h
+++ b/drivers/video/fbdev/savage/savagefb.h
@@ -195,7 +195,6 @@ struct savagefb_par {
struct savage_reg initial;
struct vgastate vgastate;
struct mutex open_lock;
- unsigned char *edid;
u32 pseudo_palette[16];
u32 open_count;
int paletteEnabled;
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 0ac750cc5ea1..8114c921ceb8 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2170,6 +2170,7 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct fb_info *info;
struct savagefb_par *par;
u_int h_sync, v_sync;
+ unsigned char __maybe_unused *edid;
int err, lpitch;
int video_len;
@@ -2212,9 +2213,9 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
INIT_LIST_HEAD(&info->modelist);
#if defined(CONFIG_FB_SAVAGE_I2C)
savagefb_create_i2c_busses(info);
- savagefb_probe_i2c_connector(info, &par->edid);
- fb_edid_to_monspecs(par->edid, &info->monspecs);
- kfree(par->edid);
+ savagefb_probe_i2c_connector(info, &edid);
+ fb_edid_to_monspecs(edid, &info->monspecs);
+ kfree(edid);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 266a5582f94d..742f62986b80 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -213,7 +213,7 @@ static void sisfb_search_mode(char *name, bool quiet)
/* This does some fuzzy mode naming detection */
if(sscanf(strbuf1, "%u %u %u %u", &xres, &yres, &depth, &rate) == 4) {
if((rate <= 32) || (depth > 32)) {
- j = rate; rate = depth; depth = j;
+ swap(rate, depth);
}
sprintf(strbuf, "%ux%ux%u", xres, yres, depth);
nameptr = strbuf;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 0fe922f726e9..bcacfb6934fa 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
}
/**
- * xxxfb_copyarea - OBSOLETE function.
+ * xxxfb_copyarea - REQUIRED function. Can use generic routines if
+ * non acclerated hardware and packed pixel based.
* Copies one area of the screen to another area.
- * Will be deleted in a future version
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
- * This drawing operation copied a rectangular area from one area of the
+ * This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
@@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = {
.fb_setcolreg = xxxfb_setcolreg,
.fb_blank = xxxfb_blank,
.fb_pan_display = xxxfb_pan_display,
- .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
- .fb_copyarea = xxxfb_copyarea, /* Obsolete */
- .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
+ .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
+ .fb_copyarea = xxxfb_copyarea, /* Needed !!! */
+ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 0dbc6bf8268a..092a1caa1208 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
if (count + p > total_size)
count = total_size - p;
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
- for (i = c >> 2; i--;) {
- *dst = fb_readl(src++);
- *dst = big_swap(*dst);
+ for (i = (c + 3) >> 2; i--;) {
+ u32 val;
+
+ val = fb_readl(src);
+ *dst = big_swap(val);
+ src++;
dst++;
}
- if (c & 3) {
- u8 *dst8 = (u8 *)dst;
- u8 __iomem *src8 = (u8 __iomem *)src;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- *dst8++ = fb_readb(++src8);
- } else {
- *dst8++ = fb_readb(--src8);
- src8 += 2;
- }
- }
- src = (u32 __iomem *)src8;
- }
if (copy_to_user(buf, buffer, c)) {
err = -EFAULT;
@@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
count = total_size - p;
}
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
break;
}
- for (i = c >> 2; i--;) {
- fb_writel(big_swap(*src), dst++);
+ for (i = (c + 3) >> 2; i--;) {
+ fb_writel(big_swap(*src), dst);
+ dst++;
src++;
}
- if (c & 3) {
- u8 *src8 = (u8 *)src;
- u8 __iomem *dst8 = (u8 __iomem *)dst;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- fb_writeb(*src8++, ++dst8);
- } else {
- fb_writeb(*src8++, --dst8);
- dst8 += 2;
- }
- }
- dst = (u32 __iomem *)dst8;
- }
*ppos += c;
buf += c;
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index bfac3ee4a642..28768c272b73 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &ufx_ops;
+ INIT_LIST_HEAD(&info->modelist);
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
@@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
INIT_DELAYED_WORK(&dev->free_framebuffer_work,
ufx_free_framebuffer_work);
- INIT_LIST_HEAD(&info->modelist);
-
retval = ufx_reg_read(dev, 0x3000, &id_rev);
check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 1e2f71c2f8a8..c6d5df31111d 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -658,9 +658,8 @@ static int ssd1307fb_probe(struct i2c_client *client)
par->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(par->reset)) {
- dev_err(dev, "failed to get reset gpio: %ld\n",
- PTR_ERR(par->reset));
- ret = PTR_ERR(par->reset);
+ ret = dev_err_probe(dev, PTR_ERR(par->reset),
+ "failed to get reset gpio\n");
goto fb_alloc_error;
}
@@ -670,7 +669,7 @@ static int ssd1307fb_probe(struct i2c_client *client)
if (ret == -ENODEV) {
par->vbat_reg = NULL;
} else {
- dev_err(dev, "failed to get VBAT regulator: %d\n", ret);
+ dev_err_probe(dev, ret, "failed to get VBAT regulator\n");
goto fb_alloc_error;
}
}
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 265865610edc..bebb2eea6448 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1041,6 +1041,47 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
SETUP_FB(fb);
}
+#define ARTIST_VRAM_SIZE 0x000804
+#define ARTIST_VRAM_SRC 0x000808
+#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04
+#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00
+#define ARTIST_SRC_BM_ACCESS 0x018008
+#define ARTIST_FGCOLOR 0x018010
+#define ARTIST_BGCOLOR 0x018014
+#define ARTIST_BITMAP_OP 0x01801c
+
+static void
+stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (rect->rop != ROP_COPY)
+ return cfb_fillrect(info, rect);
+
+ SETUP_HW(fb);
+
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP);
+ WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS);
+ NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000);
+ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color);
+ WRITE_WORD(0, fb, ARTIST_BGCOLOR);
+
+ NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy));
+ SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height));
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1105,7 +1146,7 @@ static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = stifb_fillrect,
.fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1297,7 +1338,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err0;
}
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
+ info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index b9cdd02c1000..90f48b71fd8f 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1426,7 +1426,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_rendered));
}
@@ -1434,7 +1434,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_identical));
}
@@ -1442,7 +1442,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_sent));
}
@@ -1450,7 +1450,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->cpu_kcycles_used));
}
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 088b962076b5..beec5c8d4d08 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -543,7 +543,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
/* Get panel table Pointer */
panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
viafb_fill_var_timing_info(&panel_var, panel_crt_table);
- DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
+ DEBUG_MSG(KERN_INFO "below viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info);
clock = PICOS2KHZ(panel_crt_table->pixclock) * 1000;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 22deb340a048..2d67c92c5774 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1939,8 +1939,12 @@ static int __init viafb_setup(void)
if (!strncmp(this_opt, "viafb_mode1=", 12)) {
viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL);
+ if (!viafb_mode1)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_mode=", 11)) {
viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL);
+ if (!viafb_mode)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_bpp1=", 11)) {
if (kstrtouint(this_opt + 11, 0, &viafb_bpp1) < 0)
return -EINVAL;
@@ -1969,6 +1973,8 @@ static int __init viafb_setup(void)
return -EINVAL;
} else if (!strncmp(this_opt, "viafb_active_dev=", 17)) {
viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL);
+ if (!viafb_active_dev)
+ return -ENOMEM;
} else if (!strncmp(this_opt,
"viafb_display_hardware_layout=", 30)) {
if (kstrtoint(this_opt + 30, 0,
@@ -1995,8 +2001,12 @@ static int __init viafb_setup(void)
return -EINVAL;
} else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) {
viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL);
+ if (!viafb_lcd_port)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) {
viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL);
+ if (!viafb_dvi_port)
+ return -ENOMEM;
}
}
return 0;
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index d96ab28f8ce4..4e641a780726 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -770,12 +770,18 @@ out:
fb_dealloc_cmap(&info->cmap);
kfree(info->pseudo_palette);
}
- if (remapped_fbuf != NULL)
+ if (remapped_fbuf != NULL) {
iounmap(remapped_fbuf);
- if (remapped_regs != NULL)
+ remapped_fbuf = NULL;
+ }
+ if (remapped_regs != NULL) {
iounmap(remapped_regs);
- if (remapped_base != NULL)
+ remapped_regs = NULL;
+ }
+ if (remapped_base != NULL) {
iounmap(remapped_base);
+ remapped_base = NULL;
+ }
if (info)
framebuffer_release(info);
return err;
@@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev)
fb_dealloc_cmap(&info->cmap);
iounmap(remapped_base);
+ remapped_base = NULL;
iounmap(remapped_regs);
+ remapped_regs = NULL;
iounmap(remapped_fbuf);
+ remapped_fbuf = NULL;
framebuffer_release(info);
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 8061e8ef449f..121b9293c737 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -13,6 +13,17 @@ menuconfig VIRT_DRIVERS
if VIRT_DRIVERS
+config VMGENID
+ tristate "Virtual Machine Generation ID driver"
+ default y
+ depends on ACPI
+ help
+ Say Y here to use the hypervisor-provided Virtual Machine Generation ID
+ to reseed the RNG when the VM is cloned. This is highly recommended if
+ you intend to do any rollback / cloning / snapshotting of VMs.
+
+ Prefer Y to M so that this protection is activated very early.
+
config FSL_HV_MANAGER
tristate "Freescale hypervisor management driver"
depends on FSL_SOC
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index 3e272ea60cd9..108d0ffcc9aa 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
+obj-$(CONFIG_VMGENID) += vmgenid.o
obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c
new file mode 100644
index 000000000000..0ae1a39f2e28
--- /dev/null
+++ b/drivers/virt/vmgenid.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * The "Virtual Machine Generation ID" is exposed via ACPI and changes when a
+ * virtual machine forks or is cloned. This driver exists for shepherding that
+ * information to random.c.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/random.h>
+
+ACPI_MODULE_NAME("vmgenid");
+
+enum { VMGENID_SIZE = 16 };
+
+struct vmgenid_state {
+ u8 *next_id;
+ u8 this_id[VMGENID_SIZE];
+};
+
+static int vmgenid_add(struct acpi_device *device)
+{
+ struct acpi_buffer parsed = { ACPI_ALLOCATE_BUFFER };
+ struct vmgenid_state *state;
+ union acpi_object *obj;
+ phys_addr_t phys_addr;
+ acpi_status status;
+ int ret = 0;
+
+ state = devm_kmalloc(&device->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ status = acpi_evaluate_object(device->handle, "ADDR", NULL, &parsed);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating ADDR"));
+ return -ENODEV;
+ }
+ obj = parsed.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 2 ||
+ obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ phys_addr = (obj->package.elements[0].integer.value << 0) |
+ (obj->package.elements[1].integer.value << 32);
+ state->next_id = devm_memremap(&device->dev, phys_addr, VMGENID_SIZE, MEMREMAP_WB);
+ if (IS_ERR(state->next_id)) {
+ ret = PTR_ERR(state->next_id);
+ goto out;
+ }
+
+ memcpy(state->this_id, state->next_id, sizeof(state->this_id));
+ add_device_randomness(state->this_id, sizeof(state->this_id));
+
+ device->driver_data = state;
+
+out:
+ ACPI_FREE(parsed.pointer);
+ return ret;
+}
+
+static void vmgenid_notify(struct acpi_device *device, u32 event)
+{
+ struct vmgenid_state *state = acpi_driver_data(device);
+ u8 old_id[VMGENID_SIZE];
+
+ memcpy(old_id, state->this_id, sizeof(old_id));
+ memcpy(state->this_id, state->next_id, sizeof(state->this_id));
+ if (!memcmp(old_id, state->this_id, sizeof(old_id)))
+ return;
+ add_vmfork_randomness(state->this_id, sizeof(state->this_id));
+}
+
+static const struct acpi_device_id vmgenid_ids[] = {
+ { "VM_GEN_COUNTER", 0 },
+ { }
+};
+
+static struct acpi_driver vmgenid_driver = {
+ .name = "vmgenid",
+ .ids = vmgenid_ids,
+ .owner = THIS_MODULE,
+ .ops = {
+ .add = vmgenid_add,
+ .notify = vmgenid_notify
+ }
+};
+
+module_acpi_driver(vmgenid_driver);
+
+MODULE_DEVICE_TABLE(acpi, vmgenid_ids);
+MODULE_DESCRIPTION("Virtual Machine Generation ID");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 34f80b7a8a64..492fc26f0b65 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -105,7 +105,6 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- default m
depends on X86_64
depends on VIRTIO
depends on MEMORY_HOTPLUG
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 00ac9db792a4..22f15f444f75 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status)
}
EXPORT_SYMBOL_GPL(virtio_add_status);
-int virtio_finalize_features(struct virtio_device *dev)
+/* Do some validation, then set FEATURES_OK */
+static int virtio_features_ok(struct virtio_device *dev)
{
- int ret = dev->config->finalize_features(dev);
unsigned status;
+ int ret;
might_sleep();
- if (ret)
- return ret;
ret = arch_has_restricted_virtio_memory_access();
if (ret) {
@@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev)
}
return 0;
}
-EXPORT_SYMBOL_GPL(virtio_finalize_features);
+/**
+ * virtio_reset_device - quiesce device for removal
+ * @dev: the device to reset
+ *
+ * Prevents device from sending interrupts and accessing memory.
+ *
+ * Generally used for cleanup during driver / device removal.
+ *
+ * Once this has been invoked, caller must ensure that
+ * virtqueue_notify / virtqueue_kick are not in progress.
+ *
+ * Note: this guarantees that vq callbacks are not in progress, however caller
+ * is responsible for preventing access from other contexts, such as a system
+ * call/workqueue/bh. Invoking virtio_break_device then flushing any such
+ * contexts is one way to handle that.
+ * */
void virtio_reset_device(struct virtio_device *dev)
{
dev->config->reset(dev);
@@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d)
driver_features_legacy = driver_features;
}
- /*
- * Some devices detect legacy solely via F_VERSION_1. Write
- * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
- * these when needed.
- */
- if (drv->validate && !virtio_legacy_is_little_endian()
- && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
- dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
- dev->config->finalize_features(dev);
- }
-
if (device_features & (1ULL << VIRTIO_F_VERSION_1))
dev->features = driver_features & device_features;
else
@@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d)
if (device_features & (1ULL << i))
__virtio_set_bit(dev, i);
+ err = dev->config->finalize_features(dev);
+ if (err)
+ goto err;
+
if (drv->validate) {
+ u64 features = dev->features;
+
err = drv->validate(dev);
if (err)
goto err;
+
+ /* Did validation change any features? Then write them again. */
+ if (features != dev->features) {
+ err = dev->config->finalize_features(dev);
+ if (err)
+ goto err;
+ }
}
- err = virtio_finalize_features(dev);
+ err = virtio_features_ok(dev);
if (err)
goto err;
@@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev)
/* We have a driver! */
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
- ret = virtio_finalize_features(dev);
+ ret = dev->config->finalize_features(dev);
+ if (ret)
+ goto err;
+
+ ret = virtio_features_ok(dev);
if (ret)
goto err;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 38becd8d578c..e7d6b679596d 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2476,13 +2476,10 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
/*
- * We want subblocks to span at least MAX_ORDER_NR_PAGES and
- * pageblock_nr_pages pages. This:
- * - Is required for now for alloc_contig_range() to work reliably -
- * it doesn't properly handle smaller granularity on ZONE_NORMAL.
+ * TODO: once alloc_contig_range() works reliably with pageblock
+ * granularity on ZONE_NORMAL, use pageblock_nr_pages instead.
*/
- sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
- pageblock_nr_pages) * PAGE_SIZE;
+ sb_size = PAGE_SIZE * MAX_ORDER_NR_PAGES;
sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
if (sb_size < memory_block_size_bytes() && !force_bbm) {
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 7767a7f0119b..76504559bc25 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return vdpa_set_features(vdpa, vdev->features, false);
+ return vdpa_set_features(vdpa, vdev->features);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 3fa40c723e8e..edb0acd0b832 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -169,20 +169,14 @@ undo:
__del_gref(gref);
}
- /* It's possible for the target domain to map the just-allocated grant
- * references by blindly guessing their IDs; if this is done, then
- * __del_gref will leave them in the queue_gref list. They need to be
- * added to the global list so that we can free them when they are no
- * longer referenced.
- */
- if (unlikely(!list_empty(&queue_gref)))
- list_splice_tail(&queue_gref, &gref_list);
mutex_unlock(&gref_mutex);
return rc;
}
static void __del_gref(struct gntalloc_gref *gref)
{
+ unsigned long addr;
+
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap(gref->page);
tmp[gref->notify.pgoff] = 0;
@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
if (gref->gref_id) {
- if (gnttab_query_foreign_access(gref->gref_id))
- return;
-
- if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
- return;
-
- gnttab_free_grant_reference(gref->gref_id);
+ if (gref->page) {
+ addr = (unsigned long)page_to_virt(gref->page);
+ gnttab_end_foreign_access(gref->gref_id, 0, addr);
+ } else
+ gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
list_del(&gref->next_gref);
- if (gref->page)
- __free_page(gref->page);
-
kfree(gref);
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 3729bea0c989..5c83d41766c8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -134,12 +134,9 @@ struct gnttab_ops {
*/
unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
/*
- * Query the status of a grant entry. Ref parameter is reference of
- * queried grant entry, return value is the status of queried entry.
- * Detailed status(writing/reading) can be gotten from the return value
- * by bit operations.
+ * Read the frame number related to a given grant reference.
*/
- int (*query_foreign_access)(grant_ref_t ref);
+ unsigned long (*read_frame)(grant_ref_t ref);
};
struct unmap_refs_callback_data {
@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-static int gnttab_query_foreign_access_v1(grant_ref_t ref)
-{
- return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
-}
-
-static int gnttab_query_foreign_access_v2(grant_ref_t ref)
-{
- return grstatus[ref] & (GTF_reading|GTF_writing);
-}
-
-int gnttab_query_foreign_access(grant_ref_t ref)
-{
- return gnttab_interface->query_foreign_access(ref);
-}
-EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
+{
+ return gnttab_shared.v1[ref].frame;
+}
+
+static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
+{
+ return gnttab_shared.v2[ref].full_page.frame;
+}
+
struct deferred_entry {
struct list_head list;
grant_ref_t ref;
@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
put_free_entry(entry->ref);
- if (entry->page) {
- pr_debug("freeing g.e. %#x (pfn %#lx)\n",
- entry->ref, page_to_pfn(entry->page));
- put_page(entry->page);
- } else
- pr_info("freeing g.e. %#x\n", entry->ref);
+ pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+ entry->ref, page_to_pfn(entry->page));
+ put_page(entry->page);
kfree(entry);
entry = NULL;
} else {
@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused)
static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
struct page *page)
{
- struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ struct deferred_entry *entry;
+ gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
const char *what = KERN_WARNING "leaking";
+ entry = kmalloc(sizeof(*entry), gfp);
+ if (!page) {
+ unsigned long gfn = gnttab_interface->read_frame(ref);
+
+ page = pfn_to_page(gfn_to_pfn(gfn));
+ get_page(page);
+ }
+
if (entry) {
unsigned long flags;
@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
what, ref, page ? page_to_pfn(page) : -1);
}
+int gnttab_try_end_foreign_access(grant_ref_t ref)
+{
+ int ret = _gnttab_end_foreign_access_ref(ref, 0);
+
+ if (ret)
+ put_free_entry(ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
+
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
- if (gnttab_end_foreign_access_ref(ref, readonly)) {
- put_free_entry(ref);
+ if (gnttab_try_end_foreign_access(ref)) {
if (page != 0)
put_page(virt_to_page(page));
} else
@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
.update_entry = gnttab_update_entry_v1,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
- .query_foreign_access = gnttab_query_foreign_access_v1,
+ .read_frame = gnttab_read_frame_v1,
};
static const struct gnttab_ops gnttab_v2_ops = {
@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
.update_entry = gnttab_update_entry_v2,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
- .query_foreign_access = gnttab_query_foreign_access_v2,
+ .read_frame = gnttab_read_frame_v2,
};
static bool gnttab_need_v2(void)
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 2c890f4f2cbc..72d4e3f193af 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -264,7 +264,7 @@ struct xen_device_domain_owner {
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+static LIST_HEAD(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 3c9ae156b597..0ca351f30a6d 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
if (!map->active.ring)
return;
- free_pages((unsigned long)map->active.data.in,
- map->active.ring->ring_order);
+ free_pages_exact(map->active.data.in,
+ PAGE_SIZE << map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
+ bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
+ GFP_KERNEL | __GFP_ZERO);
if (!bytes)
goto out;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e8bed1cb76ba..df6890681231 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
unsigned int nr_pages, grant_ref_t *grefs)
{
int err;
- int i, j;
+ unsigned int i;
+ grant_ref_t gref_head;
+
+ err = gnttab_alloc_grant_references(nr_pages, &gref_head);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "granting access to ring page");
+ return err;
+ }
for (i = 0; i < nr_pages; i++) {
unsigned long gfn;
@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
else
gfn = virt_to_gfn(vaddr);
- err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
- if (err < 0) {
- xenbus_dev_fatal(dev, err,
- "granting access to ring page");
- goto fail;
- }
- grefs[i] = err;
+ grefs[i] = gnttab_claim_grant_reference(&gref_head);
+ gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
+ gfn, 0);
vaddr = vaddr + XEN_PAGE_SIZE;
}
return 0;
-
-fail:
- for (j = 0; j < i; j++)
- gnttab_end_foreign_access_ref(grefs[j], 0);
- return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);